summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/procfs-smaps_rollup14
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst10
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt19
-rw-r--r--Documentation/core-api/kernel-api.rst2
-rw-r--r--Documentation/dev-tools/kmemleak.rst48
-rw-r--r--Documentation/filesystems/proc.txt41
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/alpha/include/asm/pgalloc.h40
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/include/asm/pgalloc.h41
-rw-r--r--arch/arm/kernel/efi.c3
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/pageattr.c3
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/include/asm/pgalloc.h47
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/setup.c5
-rw-r--r--arch/arm64/kernel/smp.c5
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/mm/pgd.c6
-rw-r--r--arch/csky/include/asm/pgalloc.h30
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h41
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/pgalloc.h33
-rw-r--r--arch/mips/include/asm/pgtable.h3
-rw-r--r--arch/mips/mm/Makefile1
-rw-r--r--arch/mips/mm/gup.c303
-rw-r--r--arch/nds32/include/asm/pgalloc.h31
-rw-r--r--arch/nios2/include/asm/pgalloc.h37
-rw-r--r--arch/parisc/include/asm/pgalloc.h33
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/pgtable.h14
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c72
-rw-r--r--arch/riscv/include/asm/pgalloc.h29
-rw-r--r--arch/s390/Kconfig5
-rw-r--r--arch/s390/include/asm/pgtable.h8
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/configs/hp6xx_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig1
-rw-r--r--arch/sh/include/asm/io.h6
-rw-r--r--arch/sh/include/asm/pgtable-3level.h3
-rw-r--r--arch/sh/include/asm/pgtable.h37
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/gup.c277
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h43
-rw-r--r--arch/sparc/mm/Makefile2
-rw-r--r--arch/sparc/mm/gup.c340
-rw-r--r--arch/um/include/asm/pgalloc.h16
-rw-r--r--arch/um/kernel/mem.c22
-rw-r--r--arch/unicore32/include/asm/pgalloc.h36
-rw-r--r--arch/unicore32/include/mach/regs-gpio.h2
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/bitops.h189
-rw-r--r--arch/x86/include/asm/pgalloc.h19
-rw-r--r--arch/x86/include/asm/pgtable-3level.h47
-rw-r--r--arch/x86/include/asm/pgtable_64.h8
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/mm/pgtable.c33
-rw-r--r--arch/x86/xen/mmu_pv.c3
-rw-r--r--drivers/clk/renesas/r8a77470-cpg-mssr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c3
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c2
-rw-r--r--drivers/misc/lkdtm/core.c5
-rw-r--r--drivers/misc/lkdtm/heap.c72
-rw-r--r--drivers/misc/lkdtm/lkdtm.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c2
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/privcmd.c6
-rw-r--r--drivers/xen/xlate_mmu.c3
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/notify/fanotify/fanotify.c5
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c8
-rw-r--r--fs/ocfs2/alloc.c10
-rw-r--r--fs/ocfs2/blockcheck.c56
-rw-r--r--fs/ocfs2/blockcheck.h7
-rw-r--r--fs/ocfs2/cluster/heartbeat.c102
-rw-r--r--fs/ocfs2/cluster/heartbeat.h2
-rw-r--r--fs/ocfs2/cluster/netdebug.c39
-rw-r--r--fs/ocfs2/cluster/nodemanager.c4
-rw-r--r--fs/ocfs2/cluster/quorum.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c5
-rw-r--r--fs/ocfs2/cluster/tcp.h5
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c44
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h10
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c10
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c8
-rw-r--r--fs/ocfs2/dlmglue.c96
-rw-r--r--fs/ocfs2/localalloc.c6
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/super.c29
-rw-r--r--fs/proc/base.c31
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/task_mmu.c115
-rw-r--r--fs/proc/task_nommu.c6
-rw-r--r--include/asm-generic/bitops-instrumented.h263
-rw-r--r--include/asm-generic/pgalloc.h107
-rw-r--r--include/linux/dmar.h14
-rw-r--r--include/linux/hugetlb.h120
-rw-r--r--include/linux/kasan-checks.h43
-rw-r--r--include/linux/kasan.h7
-rw-r--r--include/linux/memcontrol.h24
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/oom.h1
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/page_ext.h1
-rw-r--r--include/linux/pagemap.h6
-rw-r--r--include/linux/pfn_t.h7
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/swap.h18
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/linux/vmpressure.h2
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h24
-rw-r--r--init/main.c24
-rw-r--r--kernel/iomem.c2
-rw-r--r--lib/test_kasan.c98
-rw-r--r--mm/Kconfig16
-rw-r--r--mm/Kconfig.debug14
-rw-r--r--mm/Makefile4
-rw-r--r--mm/dmapool.c4
-rw-r--r--mm/failslab.c3
-rw-r--r--mm/filemap.c19
-rw-r--r--mm/gup.c674
-rw-r--r--mm/kasan/common.c14
-rw-r--r--mm/kasan/generic.c13
-rw-r--r--mm/kasan/kasan.h15
-rw-r--r--mm/kasan/report.c165
-rw-r--r--mm/kasan/tags.c12
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/list_lru.c3
-rw-r--r--mm/memcontrol.c461
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c15
-rw-r--r--mm/mincore.c12
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/nommu.c91
-rw-r--r--mm/oom_kill.c131
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c234
-rw-r--r--mm/page_ext.c3
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/page_isolation.c3
-rw-r--r--mm/slab.c79
-rw-r--r--mm/slab.h199
-rw-r--r--mm/slab_common.c269
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c86
-rw-r--r--mm/swap_state.c49
-rw-r--r--mm/swapfile.c291
-rw-r--r--mm/util.c47
-rw-r--r--mm/vmalloc.c108
-rw-r--r--mm/vmscan.c74
-rw-r--r--mm/z3fold.c12
-rw-r--r--net/core/sock.c2
-rwxr-xr-xscripts/decode_stacktrace.sh4
-rw-r--r--scripts/spelling.txt36
-rw-r--r--security/Kconfig.hardening29
-rw-r--r--tools/vm/slabinfo.c118
-rw-r--r--virt/kvm/arm/mmu.c2
173 files changed, 3586 insertions, 3381 deletions
diff --git a/Documentation/ABI/testing/procfs-smaps_rollup b/Documentation/ABI/testing/procfs-smaps_rollup
index 0a54ed0d63c9..274df44d8b1b 100644
--- a/Documentation/ABI/testing/procfs-smaps_rollup
+++ b/Documentation/ABI/testing/procfs-smaps_rollup
@@ -3,18 +3,28 @@ Date: August 2017
Contact: Daniel Colascione <dancol@google.com>
Description:
This file provides pre-summed memory information for a
- process. The format is identical to /proc/pid/smaps,
+ process. The format is almost identical to /proc/pid/smaps,
except instead of an entry for each VMA in a process,
smaps_rollup has a single entry (tagged "[rollup]")
for which each field is the sum of the corresponding
fields from all the maps in /proc/pid/smaps.
- For more details, see the procfs man page.
+ Additionally, the fields Pss_Anon, Pss_File and Pss_Shmem
+ are not present in /proc/pid/smaps. These fields represent
+ the sum of the Pss field of each type (anon, file, shmem).
+ For more details, see Documentation/filesystems/proc.txt
+ and the procfs man page.
Typical output looks like this:
00100000-ff709000 ---p 00000000 00:00 0 [rollup]
+ Size: 1192 kB
+ KernelPageSize: 4 kB
+ MMUPageSize: 4 kB
Rss: 884 kB
Pss: 385 kB
+ Pss_Anon: 301 kB
+ Pss_File: 80 kB
+ Pss_Shmem: 4 kB
Shared_Clean: 696 kB
Shared_Dirty: 0 kB
Private_Clean: 120 kB
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index a5c845338d6d..a9548de56ac9 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1146,6 +1146,11 @@ PAGE_SIZE multiple when read back.
otherwise, a value change in this file generates a file
modified event.
+ Note that all fields in this file are hierarchical and the
+ file modified event can be generated due to an event down the
+ hierarchy. For for the local events at the cgroup level see
+ memory.events.local.
+
low
The number of times the cgroup is reclaimed due to
high memory pressure even though its usage is under
@@ -1185,6 +1190,11 @@ PAGE_SIZE multiple when read back.
The number of processes belonging to this cgroup
killed by any kind of OOM killer.
+ memory.events.local
+ Similar to memory.events but the fields in the file are local
+ to the cgroup i.e. not hierarchical. The file modified event
+ generated on this file reflects only the local events.
+
memory.stat
A read-only flat-keyed file which exists on non-root cgroups.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f1c433daef6b..099c5a4be95b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -805,12 +805,10 @@
tracking down these problems.
debug_pagealloc=
- [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
- parameter enables the feature at boot time. In
- default, it is disabled. We can avoid allocating huge
- chunk of memory for debug pagealloc if we don't enable
- it at boot time and the system will work mostly same
- with the kernel built without CONFIG_DEBUG_PAGEALLOC.
+ [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this parameter
+ enables the feature at boot time. By default, it is
+ disabled and the system will work mostly the same as a
+ kernel built without CONFIG_DEBUG_PAGEALLOC.
on: enable the feature
debugpat [X86] Enable PAT debugging
@@ -1670,6 +1668,15 @@
initrd= [BOOT] Specify the location of the initial ramdisk
+ init_on_alloc= [MM] Fill newly allocated pages and heap objects with
+ zeroes.
+ Format: 0 | 1
+ Default set by CONFIG_INIT_ON_ALLOC_DEFAULT_ON.
+
+ init_on_free= [MM] Fill freed pages and heap objects with zeroes.
+ Format: 0 | 1
+ Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
+
init_pkru= [x86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 824f24ccf401..08af5caf036d 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -54,7 +54,7 @@ The Linux kernel provides more basic utility functions.
Bit Operations
--------------
-.. kernel-doc:: arch/x86/include/asm/bitops.h
+.. kernel-doc:: include/asm-generic/bitops-instrumented.h
:internal:
Bitmap Operations
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index e6f51260ff32..3621cd5e1eef 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -2,8 +2,8 @@ Kernel Memory Leak Detector
===========================
Kmemleak provides a way of detecting possible kernel memory leaks in a
-way similar to a tracing garbage collector
-(https://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors),
+way similar to a `tracing garbage collector
+<https://en.wikipedia.org/wiki/Tracing_garbage_collection>`_,
with the difference that the orphan objects are not freed but only
reported via /sys/kernel/debug/kmemleak. A similar method is used by the
Valgrind tool (``memcheck --leak-check``) to detect the memory leaks in
@@ -15,10 +15,13 @@ Usage
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
thread scans the memory every 10 minutes (by default) and prints the
-number of new unreferenced objects found. To display the details of all
-the possible memory leaks::
+number of new unreferenced objects found. If the ``debugfs`` isn't already
+mounted, mount with::
# mount -t debugfs nodev /sys/kernel/debug/
+
+To display the details of all the possible scanned memory leaks::
+
# cat /sys/kernel/debug/kmemleak
To trigger an intermediate memory scan::
@@ -72,6 +75,9 @@ If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
disabled by default. Passing ``kmemleak=on`` on the kernel command
line enables the function.
+If you are getting errors like "Error while writing to stdout" or "write_loop:
+Invalid argument", make sure kmemleak is properly enabled.
+
Basic Algorithm
---------------
@@ -218,3 +224,37 @@ the pointer is calculated by other methods than the usual container_of
macro or the pointer is stored in a location not scanned by kmemleak.
Page allocations and ioremap are not tracked.
+
+Testing with kmemleak-test
+--------------------------
+
+To check if you have all set up to use kmemleak, you can use the kmemleak-test
+module, a module that deliberately leaks memory. Set CONFIG_DEBUG_KMEMLEAK_TEST
+as module (it can't be used as bult-in) and boot the kernel with kmemleak
+enabled. Load the module and perform a scan with::
+
+ # modprobe kmemleak-test
+ # echo scan > /sys/kernel/debug/kmemleak
+
+Note that the you may not get results instantly or on the first scanning. When
+kmemleak gets results, it'll log ``kmemleak: <count of leaks> new suspected
+memory leaks``. Then read the file to see then::
+
+ # cat /sys/kernel/debug/kmemleak
+ unreferenced object 0xffff89862ca702e8 (size 32):
+ comm "modprobe", pid 2088, jiffies 4294680594 (age 375.486s)
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
+ backtrace:
+ [<00000000e0a73ec7>] 0xffffffffc01d2036
+ [<000000000c5d2a46>] do_one_initcall+0x41/0x1df
+ [<0000000046db7e0a>] do_init_module+0x55/0x200
+ [<00000000542b9814>] load_module+0x203c/0x2480
+ [<00000000c2850256>] __do_sys_finit_module+0xba/0xe0
+ [<000000006564e7ef>] do_syscall_64+0x43/0x110
+ [<000000007c873fa6>] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ ...
+
+Removing the module with ``rmmod kmemleak_test`` should also trigger some
+kmemleak results.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index a226061fa109..d750b6926899 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -154,9 +154,11 @@ Table 1-1: Process specific entries in /proc
symbol the task is blocked in - or "0" if not blocked.
pagemap Page table
stack Report full stack trace, enable via CONFIG_STACKTRACE
- smaps an extension based on maps, showing the memory consumption of
+ smaps An extension based on maps, showing the memory consumption of
each mapping and flags associated with it
- numa_maps an extension based on maps, showing the memory locality and
+ smaps_rollup Accumulated smaps stats for all mappings of the process. This
+ can be derived from smaps, but is faster and more convenient
+ numa_maps An extension based on maps, showing the memory locality and
binding policy as well as mem usage (in pages) of each mapping.
..............................................................................
@@ -366,7 +368,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
exit_code the thread's exit_code in the form reported by the waitpid system call
..............................................................................
-The /proc/PID/maps file containing the currently mapped memory regions and
+The /proc/PID/maps file contains the currently mapped memory regions and
their access permissions.
The format is:
@@ -417,11 +419,14 @@ is not associated with a file:
or if empty, the mapping is anonymous.
The /proc/PID/smaps is an extension based on maps, showing the memory
-consumption for each of the process's mappings. For each of mappings there
-is a series of lines such as the following:
+consumption for each of the process's mappings. For each mapping (aka Virtual
+Memory Area, or VMA) there is a series of lines such as the following:
08048000-080bc000 r-xp 00000000 03:02 13130 /bin/bash
+
Size: 1084 kB
+KernelPageSize: 4 kB
+MMUPageSize: 4 kB
Rss: 892 kB
Pss: 374 kB
Shared_Clean: 892 kB
@@ -443,11 +448,14 @@ Locked: 0 kB
THPeligible: 0
VmFlags: rd ex mr mw me dw
-the first of these lines shows the same information as is displayed for the
-mapping in /proc/PID/maps. The remaining lines show the size of the mapping
-(size), the amount of the mapping that is currently resident in RAM (RSS), the
-process' proportional share of this mapping (PSS), the number of clean and
-dirty private pages in the mapping.
+The first of these lines shows the same information as is displayed for the
+mapping in /proc/PID/maps. Following lines show the size of the mapping
+(size); the size of each page allocated when backing a VMA (KernelPageSize),
+which is usually the same as the size in the page table entries; the page size
+used by the MMU when backing a VMA (in most cases, the same as KernelPageSize);
+the amount of the mapping that is currently resident in RAM (RSS); the
+process' proportional share of this mapping (PSS); and the number of clean and
+dirty shared and private pages in the mapping.
The "proportional set size" (PSS) of a process is the count of pages it has
in memory, where each page is divided by the number of processes sharing it.
@@ -532,6 +540,19 @@ guarantees:
2) If there is something at a given vaddr during the entirety of the
life of the smaps/maps walk, there will be some output for it.
+The /proc/PID/smaps_rollup file includes the same fields as /proc/PID/smaps,
+but their values are the sums of the corresponding values for all mappings of
+the process. Additionally, it contains these fields:
+
+Pss_Anon
+Pss_File
+Pss_Shmem
+
+They represent the proportional shares of anonymous, file, and shmem pages, as
+described for smaps above. These fields are omitted in smaps since each
+mapping identifies the type (anon, file, or shmem) of all pages it contains.
+Thus all information in smaps_rollup can be derived from smaps, but at a
+significantly higher cost.
The /proc/PID/clear_refs is used to reset the PG_Referenced and ACCESSED/YOUNG
bits on both physical and virtual pages associated with a process, and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 133ec3cc11ae..60218e6a60ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11260,7 +11260,7 @@ F: include/uapi/linux/nfs*
F: include/uapi/linux/sunrpc/
NILFS2 FILESYSTEM
-M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
+M: Ryusuke Konishi <konishi.ryusuke@gmail.com>
L: linux-nilfs@vger.kernel.org
W: https://nilfs.sourceforge.io/
W: https://nilfs.osdn.jp/
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index 02f9f91bb4f0..71ded3b7d82d 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -5,6 +5,8 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
@@ -41,7 +43,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pmd_t *
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+ pmd_t *ret = (pmd_t *)__get_free_page(GFP_PGTABLE_USER);
return ret;
}
@@ -51,42 +53,6 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm)
-{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- return pte;
-}
-
-static inline void
-pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
-{
- pte_t *pte = pte_alloc_one_kernel(mm);
- struct page *page;
-
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return page;
-}
-
-static inline void
-pte_free(struct mm_struct *mm, pgtable_t page)
-{
- pgtable_page_dtor(page);
- __free_page(page);
-}
-
#define check_pgt_cache() do { } while (0)
#endif /* _ALPHA_PGALLOC_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d850feb5cc0a..2bf1ce39a96d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -75,6 +75,7 @@ config ARM
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
@@ -1622,16 +1623,9 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool ARCH_SPARSEMEM_ENABLE
-config ARCH_SELECT_MEMORY_MODEL
- def_bool ARCH_SPARSEMEM_ENABLE
-
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
-config HAVE_GENERIC_GUP
- def_bool y
- depends on ARM_LPAE
-
config HIGHMEM
bool "High Memory Support"
depends on MMU
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index c038cff6fdd3..a2a68b751971 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -54,8 +54,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
-
static inline void clean_pte_table(pte_t *pte)
{
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
@@ -77,54 +75,41 @@ static inline void clean_pte_table(pte_t *pte)
* | h/w pt 1 |
* +------------+
*/
+
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
+ pte_t *pte = __pte_alloc_one_kernel(mm);
- pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_pte_table(pte);
return pte;
}
+#ifdef CONFIG_HIGHPTE
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
+#else
+#define PGTABLE_HIGHMEM 0
+#endif
+
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
-#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
-#else
- pte = alloc_pages(PGALLOC_GFP, 0);
-#endif
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER | PGTABLE_HIGHMEM);
if (!pte)
return NULL;
if (!PageHighMem(pte))
clean_pte_table(page_address(pte));
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
return pte;
}
-/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- if (pte)
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
pmdval_t prot)
{
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
index ed005870671a..e57dbcc89123 100644
--- a/arch/arm/kernel/efi.c
+++ b/arch/arm/kernel/efi.c
@@ -8,8 +8,7 @@
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-static int __init set_permissions(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = *ptep;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 439bb6a59a04..1fb5c0ca1ed8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -493,8 +493,7 @@ void __init dma_contiguous_remap(void)
}
}
-static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data)
+static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
{
struct page *page = virt_to_page(addr);
pgprot_t prot = *(pgprot_t *)data;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1aa2586fa597..d9a0038774a6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -729,7 +729,7 @@ static void __init *early_alloc(unsigned long sz)
static void *__init late_alloc(unsigned long sz)
{
- void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
+ void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
BUG();
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 0f5faf30d9bf..d546efad7e97 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -14,8 +14,7 @@ struct page_change_data {
pgprot_t clear_mask;
};
-static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
pte_t pte = *ptep;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c085aec9459b..a36ff61321ce 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -143,6 +143,7 @@ config ARM64
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
@@ -267,9 +268,6 @@ config ZONE_DMA32
bool "Support DMA32 zone" if EXPERT
default y
-config HAVE_GENERIC_GUP
- def_bool y
-
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index cdced518378d..14d0bc44d451 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -13,18 +13,23 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
#define check_pgt_cache() do { } while (0)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
+ gfp_t gfp = GFP_PGTABLE_USER;
struct page *page;
- page = alloc_page(PGALLOC_GFP);
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+
+ page = alloc_page(gfp);
if (!page)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
@@ -61,7 +66,7 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pud_t *)__get_free_page(PGALLOC_GFP);
+ return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
@@ -89,42 +94,6 @@ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)__get_free_page(PGALLOC_GFP);
-}
-
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_pages(PGALLOC_GFP, 0);
- if (!pte)
- return NULL;
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
- return pte;
-}
-
-/*
- * Free a PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
-{
- if (ptep)
- free_page((unsigned long)ptep);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
pmdval_t prot)
{
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 3c33d0dd8e0e..d0cf596db82c 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -82,8 +82,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
return 0;
}
-static int __init set_permissions(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = READ_ONCE(*ptep);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 7e541f947b4c..9c4bad7d7131 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -283,6 +283,11 @@ void __init setup_arch(char **cmdline_p)
setup_machine_fdt(__fdt_pointer);
+ /*
+ * Initialise the static keys early as they may be enabled by the
+ * cpufeature code and early parameters.
+ */
+ jump_label_init();
parse_early_param();
/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 9286ee6749e8..ea90d3bd9253 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -420,11 +420,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
- /*
- * Initialise the static keys early as they may be enabled by the
- * cpufeature code.
- */
- jump_label_init();
cpuinfo_store_boot_cpu();
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3645f29bd814..1b49c08dfa2b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -362,7 +362,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
static phys_addr_t __pgd_pgtable_alloc(int shift)
{
- void *ptr = (void *)__get_free_page(PGALLOC_GFP);
+ void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
BUG_ON(!ptr);
/* Ensure the zeroed page is visible to the page table walker */
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index fcdcf6cd7677..03c53f16ee77 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -19,8 +19,7 @@ struct page_change_data {
bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
-static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
+static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
struct page_change_data *cdata = data;
pte_t pte = READ_ONCE(*ptep);
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 9a0c7d5090d6..7548f9ca1f11 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -19,10 +19,12 @@ static struct kmem_cache *pgd_cache __ro_after_init;
pgd_t *pgd_alloc(struct mm_struct *mm)
{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
if (PGD_SIZE == PAGE_SIZE)
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_page(gfp);
else
- return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
+ return kmem_cache_alloc(pgd_cache, gfp);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index d213bb47b717..98c5716708d6 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -8,6 +8,9 @@
#include <linux/mm.h>
#include <linux/sched.h>
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
@@ -39,33 +42,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- if (!pte)
- return NULL;
-
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
-
- return pte;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_pages((unsigned long)pte, PTE_ORDER);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_pages(pte, PTE_ORDER);
-}
-
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGD_ORDER);
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 1456c5eecbd9..1a8ddbd0d23c 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -13,55 +13,18 @@
#include <asm/tlb.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
extern const char bad_pmd_string[];
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long) pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t page)
-{
- pgtable_page_dtor(page);
- __free_page(page);
-}
-
#define __pte_free_tlb(tlb,pte,addr) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- unsigned long page = __get_free_page(GFP_KERNEL);
-
- if (!page)
- return NULL;
-
- memset((void *)page, 0, PAGE_SIZE);
- return (pte_t *) (page);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page = alloc_pages(GFP_KERNEL, 0);
-
- if (page == NULL)
- return NULL;
-
- clear_highpage(page);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return page;
-
-}
-
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_val(*pmd) = __pa((unsigned long)pte);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 70d3200476bf..7957d3457156 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -34,6 +34,7 @@ config MIPS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GUP_GET_PTE_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
@@ -52,6 +53,7 @@ config MIPS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 27808d9461f4..aa16b85ddffc 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -13,6 +13,8 @@
#include <linux/mm.h>
#include <linux/sched.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
@@ -50,37 +52,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
- if (!pte)
- return NULL;
- clear_highpage(pte);
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
- return pte;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_pages((unsigned long)pte, PTE_ORDER);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_pages(pte, PTE_ORDER);
-}
-
#define __pte_free_tlb(tlb,pte,address) \
do { \
pgtable_page_dtor(pte); \
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4ccb465ef3f2..7d27194e3b45 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,7 @@
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/pgtable-bits.h>
+#include <asm/cpu-features.h>
struct mm_struct;
struct vm_area_struct;
@@ -626,6 +627,8 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
+
#include <asm-generic/pgtable.h>
/*
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index f34d7ff5eb60..1e8d335025d7 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -7,7 +7,6 @@ obj-y += cache.o
obj-y += context.o
obj-y += extable.o
obj-y += fault.o
-obj-y += gup.o
obj-y += init.o
obj-y += mmap.o
obj-y += page.o
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
deleted file mode 100644
index 4c2b4483683c..000000000000
--- a/arch/mips/mm/gup.c
+++ /dev/null
@@ -1,303 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for MIPS
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- * Copyright (C) 2011 Ralf Baechle
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/hugetlb.h>
-
-#include <asm/cpu-features.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#else
- return READ_ONCE(*ptep);
-#endif
-}
-
-static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t *ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- if (!pte_present(pte) ||
- pte_special(pte) || (write && !pte_write(pte))) {
- pte_unmap(ptep);
- return 0;
- }
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- SetPageReferenced(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- pte_unmap(ptep - 1);
- return 1;
-}
-
-static inline void get_head_page_multiple(struct page *page, int nr)
-{
- VM_BUG_ON(page != compound_head(page));
- VM_BUG_ON(page_count(page) == 0);
- page_ref_add(page, nr);
- SetPageReferenced(page);
-}
-
-static int gup_huge_pmd(pmd_t pmd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pmd;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_huge(pmd))) {
- if (!gup_huge_pmd(pmd, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pte_range(pmd, addr, next, write, pages,nr))
- return 0;
- }
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- pte_t pte = *(pte_t *)&pud;
- struct page *head, *page;
- int refs;
-
- if (write && !pte_write(pte))
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(pte_special(pte));
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- get_head_page_multiple(head, refs);
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_huge(pud))) {
- if (!gup_huge_pud(pud, addr, next, write, pages,nr))
- return 0;
- } else {
- if (!gup_pmd_range(pud, addr, next, write, pages,nr))
- return 0;
- }
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok((void __user *)start, len)))
- return 0;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch
- * size will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int ret, nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start || cpu_has_dc_aliases)
- goto slow_irqon;
-
- /* XXX: batch / limit 'nr' */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-slow:
- local_irq_enable();
-
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
- pages, gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
- return ret;
-}
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 3cbc749c79aa..e78b43d8389f 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -9,6 +9,9 @@
#include <asm/tlbflush.h>
#include <asm/proc-fns.h>
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
/*
* Since we have only two-level page tables, these are trivial
*/
@@ -22,22 +25,11 @@ extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
#define check_pgt_cache() do { } while (0)
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- pte_t *pte;
-
- pte =
- (pte_t *) __get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_ZERO);
-
- return pte;
-}
-
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO, 0);
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER);
if (pte)
cpu_dcache_wb_page((unsigned long)page_address(pte));
@@ -45,21 +37,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
}
/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t * pte)
-{
- if (pte) {
- free_page((unsigned long)pte);
- }
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- __free_page(pte);
-}
-
-/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 3a149ead1207..4bc8cf72067e 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -12,6 +12,8 @@
#include <linux/mm.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
{
@@ -37,41 +39,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
-
- return pte;
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
- if (pte) {
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
- clear_highpage(pte);
- }
- return pte;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_pages((unsigned long)pte, PTE_ORDER);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
-{
- pgtable_page_dtor(pte);
- __free_pages(pte, PTE_ORDER);
-}
-
#define __pte_free_tlb(tlb, pte, addr) \
do { \
pgtable_page_dtor(pte); \
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index ea75cc966dae..4f2059a50fae 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
#include <asm/cache.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
/* Allocate the top level pgd (page directory)
*
* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
@@ -122,37 +124,6 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
pmd_populate_kernel(mm, pmd, page_address(pte_page))
#define pmd_pgtable(pmd) pmd_page(pmd)
-static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!page)
- return NULL;
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return page;
-}
-
-static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm)
-{
- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- return pte;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
-{
- pgtable_page_dtor(pte);
- pte_free_kernel(mm, page_address(pte));
-}
-
#define check_pgt_cache() do { } while (0)
#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3b795a0cab62..24a41f919309 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -125,6 +125,7 @@ config PPC
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
+ select ARCH_HAS_HUGEPD if HUGETLB_PAGE
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
@@ -185,12 +186,12 @@ config PPC
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
select HAVE_EBPF_JIT if PPC64
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
- select HAVE_GENERIC_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 3f53be60fb01..64145751b2fd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -140,6 +140,20 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif
+#ifdef CONFIG_PPC64
+#define is_ioremap_addr is_ioremap_addr
+static inline bool is_ioremap_addr(const void *x)
+{
+#ifdef CONFIG_MMU
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= IOREMAP_BASE && addr < IOREMAP_END;
+#else
+ return false;
+#endif
+}
+#endif /* CONFIG_PPC64 */
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index e8276161872e..381bf8dea193 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -827,7 +827,7 @@ static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
*
* Note: If EOI is incorrectly used by SW to lower the CPPR
* value (ie more favored), we do not check for rejection of
- * a pending interrupt, this is a SW error and PAPR sepcifies
+ * a pending interrupt, this is a SW error and PAPR specifies
* that we don't have to deal with it.
*
* The sending of an EOI to the ICS is handled after the
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index b5d92dc32844..51716c11d0fb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -511,13 +511,6 @@ retry:
return page;
}
-static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
- unsigned long sz)
-{
- unsigned long __boundary = (addr + sz) & ~(sz-1);
- return (__boundary - 1 < end - 1) ? __boundary : end;
-}
-
#ifdef CONFIG_PPC_MM_SLICES
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
@@ -665,68 +658,3 @@ void flush_dcache_icache_hugepage(struct page *page)
}
}
}
-
-static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long pte_end;
- struct page *head, *page;
- pte_t pte;
- int refs;
-
- pte_end = (addr + sz) & ~(sz-1);
- if (pte_end < end)
- end = pte_end;
-
- pte = READ_ONCE(*ptep);
-
- if (!pte_access_permitted(pte, write))
- return 0;
-
- /* hugepages are never "special" */
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
-
- page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- /* Could be optimized better */
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- pte_t *ptep;
- unsigned long sz = 1UL << hugepd_shift(hugepd);
- unsigned long next;
-
- ptep = hugepte_offset(hugepd, addr, pdshift);
- do {
- next = hugepte_addr_end(addr, end, sz);
- if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
- return 0;
- } while (ptep++, addr = next, addr != end);
-
- return 1;
-}
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index eb8b0195f27f..56a67d66f72f 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
#include <linux/mm.h>
#include <asm/tlb.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -74,33 +76,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif /* __PAGETABLE_PMD_FOLDED */
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)__get_free_page(
- GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
- if (likely(pte != NULL))
- pgtable_page_ctor(pte);
- return pte;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
#define __pte_free_tlb(tlb, pte, buf) \
do { \
pgtable_page_dtor(pte); \
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index fdb4246265a5..5d8570ed6cab 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -139,6 +139,7 @@ config S390
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_FAST_GUP
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FENTRY
select HAVE_FTRACE_MCOUNT_RECORD
@@ -146,7 +147,6 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_GUP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -641,9 +641,6 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
-config ARCH_SELECT_MEMORY_MODEL
- def_bool y
-
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y if SPARSEMEM
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9f0195d5fa16..9b274fcaacb6 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1270,14 +1270,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (end < start)
- return false;
return end <= current->mm->context.asce_limit;
}
#define gup_fast_permitted gup_fast_permitted
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index c7c99e18d5ff..31a7d12db705 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -15,6 +15,7 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_FAST_GUP if MMU
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -64,6 +65,7 @@ config SUPERH
config SUPERH32
def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
+ select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_IOREMAP_PROT if MMU && !X2TLB
diff --git a/arch/sh/configs/hp6xx_defconfig b/arch/sh/configs/hp6xx_defconfig
index 4dcf7f552582..91d43e2bffea 100644
--- a/arch/sh/configs/hp6xx_defconfig
+++ b/arch/sh/configs/hp6xx_defconfig
@@ -40,7 +40,6 @@ CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_HIT=y
CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_PEARL_8x8=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 5209889765ad..49a29338789b 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -191,7 +191,6 @@ CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=m
CONFIG_JFFS2_FS_XATTR=y
CONFIG_UBIFS_FS=m
-CONFIG_LOGFS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_ROMFS_FS=m
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index a1cf6447dbb1..cbd6742eb423 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -85,7 +85,6 @@ CONFIG_WATCHDOG=y
CONFIG_SH_WDT=y
CONFIG_SSB=y
CONFIG_FB=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index c28e37a344ad..ac0561960c52 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -369,7 +369,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#define ioremap_nocache ioremap
#define ioremap_uc ioremap
-#define iounmap __iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+ __iounmap(addr);
+}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 7d8587eb65ff..779260b721ca 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -38,6 +38,9 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return pud_val(pud);
}
+/* only used by the stubbed out hugetlb gup code, should never be called */
+#define pud_page(pud) NULL
+
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 3587103afe59..9085d1142fa3 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -149,6 +149,43 @@ extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd);
+static inline bool __pte_access_permitted(pte_t pte, u64 prot)
+{
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
+}
+
+#ifdef CONFIG_X2TLB
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT;
+
+ prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
+ if (write)
+ prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
+ return __pte_access_permitted(pte, prot);
+}
+#elif defined(CONFIG_SUPERH64)
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
+
+ if (write)
+ prot |= _PAGE_WRITE;
+ return __pte_access_permitted(pte, prot);
+}
+#else
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot = _PAGE_PRESENT | _PAGE_USER;
+
+ if (write)
+ prot |= _PAGE_RW;
+ return __pte_access_permitted(pte, prot);
+}
+#endif
+
+#define pte_access_permitted pte_access_permitted
+
/* arch/sh/mm/mmap.c */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index fbe5e79751b3..5051b38fd5b6 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -17,7 +17,7 @@ cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o gup.o ioremap.o kmap.o \
+mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \
pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
obj-y += $(mmu-y)
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
deleted file mode 100644
index 277c882f7489..000000000000
--- a/arch/sh/mm/gup.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for SuperH
- *
- * Copyright (C) 2009 - 2010 Paul Mundt
- *
- * Cloned from the x86 and PowerPC versions, by:
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#ifndef CONFIG_X2TLB
- return READ_ONCE(*ptep);
-#else
- /*
- * With get_user_pages_fast, we walk down the pagetables without
- * taking any locks. For this we would like to load the pointers
- * atomically, but that is not possible with 64-bit PTEs. What
- * we do have is the guarantee that a pte will only either go
- * from not present to present, or present to not present or both
- * -- it will not switch to a completely different present page
- * without a TLB flush in between; something that we are blocking
- * by holding interrupts off.
- *
- * Setting ptes from not present to present goes:
- * ptep->pte_high = h;
- * smp_wmb();
- * ptep->pte_low = l;
- *
- * And present to not present goes:
- * ptep->pte_low = 0;
- * smp_wmb();
- * ptep->pte_high = 0;
- *
- * We must ensure here that the load of pte_low sees l iff pte_high
- * sees h. We load pte_high *after* loading pte_low, which ensures we
- * don't see an older value of pte_high. *Then* we recheck pte_low,
- * which ensures that we haven't picked up a changed pte high. We might
- * have got rubbish values from pte_low and pte_high, but we are
- * guaranteed that pte_low will not have the present bit set *unless*
- * it is 'l'. And get_user_pages_fast only operates on present ptes, so
- * we're safe.
- *
- * gup_get_pte should not be used or copied outside gup.c without being
- * very careful -- it does not atomically load the pte or anything that
- * is likely to be useful for you.
- */
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#endif
-}
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- u64 mask, result;
- pte_t *ptep;
-
-#ifdef CONFIG_X2TLB
- result = _PAGE_PRESENT | _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ);
- if (write)
- result |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
-#elif defined(CONFIG_SUPERH64)
- result = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
- if (write)
- result |= _PAGE_WRITE;
-#else
- result = _PAGE_PRESENT | _PAGE_USER;
- if (write)
- result |= _PAGE_RW;
-#endif
-
- mask = result | _PAGE_SPECIAL;
-
- ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- if ((pte_val(pte) & mask) != result) {
- pte_unmap(ptep);
- return 0;
- }
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- __flush_anon_page(page, addr);
- flush_dcache_page(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok((void __user *)start, len)))
- return 0;
-
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start)
- goto slow_irqon;
-
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, pages,
- gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 26ab6f5bbaaf..e9f5d62e9817 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -28,6 +28,7 @@ config SPARC
select RTC_DRV_M48T59
select RTC_SYSTOHC
select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select HAVE_FAST_GUP if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
@@ -300,9 +301,6 @@ config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
-config ARCH_SELECT_MEMORY_MODEL
- def_bool y if SPARC64
-
config ARCH_SPARSEMEM_ENABLE
def_bool y if SPARC64
select SPARSEMEM_VMEMMAP_ENABLE
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 22500c3be7a9..1599de730532 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -864,6 +864,9 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#define pgd_present(pgd) (pgd_val(pgd) != 0U)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
+/* only used by the stubbed out hugetlb gup code, should never be called */
+#define pgd_page(pgd) NULL
+
static inline unsigned long pud_large(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
@@ -1075,6 +1078,46 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
}
#define io_remap_pfn_range io_remap_pfn_range
+static inline unsigned long untagged_addr(unsigned long start)
+{
+ if (adi_capable()) {
+ long addr = start;
+
+ /* If userspace has passed a versioned address, kernel
+ * will not find it in the VMAs since it does not store
+ * the version tags in the list of VMAs. Storing version
+ * tags in list of VMAs is impractical since they can be
+ * changed any time from userspace without dropping into
+ * kernel. Any address search in VMAs will be done with
+ * non-versioned addresses. Ensure the ADI version bits
+ * are dropped here by sign extending the last bit before
+ * ADI bits. IOMMU does not implement version tags.
+ */
+ return (addr << (long)adi_nbits()) >> (long)adi_nbits();
+ }
+
+ return start;
+}
+#define untagged_addr untagged_addr
+
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ u64 prot;
+
+ if (tlb_type == hypervisor) {
+ prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
+ if (write)
+ prot |= _PAGE_WRITE_4V;
+ } else {
+ prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
+ if (write)
+ prot |= _PAGE_WRITE_4U;
+ }
+
+ return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
+}
+#define pte_access_permitted pte_access_permitted
+
#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index d39075b1e3b7..b078205b70e0 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -5,7 +5,7 @@
asflags-y := -ansi
ccflags-y := -Werror
-obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
deleted file mode 100644
index 1e770a517d4a..000000000000
--- a/arch/sparc/mm/gup.c
+++ /dev/null
@@ -1,340 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for sparc, cribbed from powerpc
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-#include <asm/adi.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long mask, result;
- pte_t *ptep;
-
- if (tlb_type == hypervisor) {
- result = _PAGE_PRESENT_4V|_PAGE_P_4V;
- if (write)
- result |= _PAGE_WRITE_4V;
- } else {
- result = _PAGE_PRESENT_4U|_PAGE_P_4U;
- if (write)
- result |= _PAGE_WRITE_4U;
- }
- mask = result | _PAGE_SPECIAL;
-
- ptep = pte_offset_kernel(&pmd, addr);
- do {
- struct page *page, *head;
- pte_t pte = *ptep;
-
- if ((pte_val(pte) & mask) != result)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- /* The hugepage case is simplified on sparc64 because
- * we encode the sub-page pfn offsets into the
- * hugepage PTEs. We could optimize this in the future
- * use page_cache_add_speculative() for the hugepage case.
- */
- page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
- return 0;
- }
-
- pages[*nr] = page;
- (*nr)++;
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages,
- int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!(pmd_val(pmd) & _PAGE_VALID))
- return 0;
-
- if (write && !pmd_write(pmd))
- return 0;
-
- refs = 0;
- page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- head = compound_head(page);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages,
- int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!(pud_val(pud) & _PAGE_VALID))
- return 0;
-
- if (write && !pud_write(pud))
- return 0;
-
- refs = 0;
- page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- head = compound_head(page);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pud_val(pud) != pud_val(*pudp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_large(pmd))) {
- if (!gup_huge_pmd(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmd, addr, next, write,
- pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&pgd, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_large(pud))) {
- if (!gup_huge_pud(pudp, pud, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp;
- int nr = 0;
-
-#ifdef CONFIG_SPARC64
- if (adi_capable()) {
- long addr = start;
-
- /* If userspace has passed a versioned address, kernel
- * will not find it in the VMAs since it does not store
- * the version tags in the list of VMAs. Storing version
- * tags in list of VMAs is impractical since they can be
- * changed any time from userspace without dropping into
- * kernel. Any address search in VMAs will be done with
- * non-versioned addresses. Ensure the ADI version bits
- * are dropped here by sign extending the last bit before
- * ADI bits. IOMMU does not implement version tags.
- */
- addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
- start = addr;
- }
-#endif
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-int get_user_pages_fast(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int nr = 0;
-
-#ifdef CONFIG_SPARC64
- if (adi_capable()) {
- long addr = start;
-
- /* If userspace has passed a versioned address, kernel
- * will not find it in the VMAs since it does not store
- * the version tags in the list of VMAs. Storing version
- * tags in list of VMAs is impractical since they can be
- * changed any time from userspace without dropping into
- * kernel. Any address search in VMAs will be done with
- * non-versioned addresses. Ensure the ADI version bits
- * are dropped here by sign extending the last bit before
- * ADI bits. IOMMU does not implements version tags,
- */
- addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
- start = addr;
- }
-#endif
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch size
- * will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables from being freed on sparc.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_disable();
-
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
- pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
-
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-
-slow:
- local_irq_enable();
-
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, pages,
- gup_flags);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 99eb5682792a..d7b282e9c4d5 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
#include <linux/mm.h>
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
@@ -25,20 +27,6 @@
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
-extern pgtable_t pte_alloc_one(struct mm_struct *);
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long) pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
#define __pte_free_tlb(tlb,pte, address) \
do { \
pgtable_page_dtor(pte); \
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index a9c9a94c096f..de58e976b9bc 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -208,28 +208,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long) pgd);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- return pte;
-}
-
-pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *pte;
-
- pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!pte)
- return NULL;
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
- return pte;
-}
-
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index ec64834b1c6a..3f0903bd98e9 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -14,6 +14,10 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
#define check_pgt_cache() do { } while (0)
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
@@ -25,17 +29,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
-
/*
* Allocate one PTE table.
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm)
{
- pte_t *pte;
+ pte_t *pte = __pte_alloc_one_kernel(mm);
- pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte)
clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
@@ -47,35 +48,14 @@ pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
- pte = alloc_pages(PGALLOC_GFP, 0);
+ pte = __pte_alloc_one(mm, GFP_PGTABLE_USER);
if (!pte)
return NULL;
- if (!PageHighMem(pte)) {
- void *page = page_address(pte);
- clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
- }
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- }
-
+ if (!PageHighMem(pte))
+ clean_pte_table(page_address(pte));
return pte;
}
-/*
- * Free one PTE table.
- */
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- if (pte)
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
{
set_pmd(pmdp, __pmd(pmdval));
diff --git a/arch/unicore32/include/mach/regs-gpio.h b/arch/unicore32/include/mach/regs-gpio.h
index 806350e1ccb6..5fc701ee33e3 100644
--- a/arch/unicore32/include/mach/regs-gpio.h
+++ b/arch/unicore32/include/mach/regs-gpio.h
@@ -32,7 +32,7 @@
*/
#define GPIO_GEDR (PKUNITY_GPIO_BASE + 0x0018)
/*
- * Sepcial Voltage Detect Reg GPIO_GPIR.
+ * Special Voltage Detect Reg GPIO_GPIR.
*/
#define GPIO_GPIR (PKUNITY_GPIO_BASE + 0x0020)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dce10b18f4bc..9df2d1cb7a9e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -123,6 +123,7 @@ config X86
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
+ select GUP_GET_PTE_LOW_HIGH if X86_PAE
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
@@ -158,6 +159,7 @@ config X86
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
select HAVE_EXIT_THREAD
+ select HAVE_FAST_GUP
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
@@ -2906,9 +2908,6 @@ config HAVE_ATOMIC_IOMAP
config X86_DEV_DMA_OPS
bool
-config HAVE_GENERIC_GUP
- def_bool y
-
source "drivers/firmware/Kconfig"
source "arch/x86/kvm/Kconfig"
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 629d1ee05599..1cee10091b9f 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -358,7 +358,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
/* Create the ucontext. */
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ if (static_cpu_has(X86_FEATURE_XSAVE))
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else
put_user_ex(0, &frame->uc.uc_flags);
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 8e790ec219a5..ba15d53c1ca7 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -49,23 +49,8 @@
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
static __always_inline void
-set_bit(long nr, volatile unsigned long *addr)
+arch_set_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -78,32 +63,14 @@ set_bit(long nr, volatile unsigned long *addr)
}
}
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___set_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
static __always_inline void
-clear_bit(long nr, volatile unsigned long *addr)
+arch_clear_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -115,26 +82,21 @@ clear_bit(long nr, volatile unsigned long *addr)
}
}
-/*
- * clear_bit_unlock - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and implies release semantics before the memory
- * operation. It can be used for an unlock.
- */
-static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
- clear_bit(nr, addr);
+ arch_clear_bit(nr, addr);
}
-static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
-static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1"
@@ -143,48 +105,23 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
+#define arch_clear_bit_unlock_is_negative_byte \
+ arch_clear_bit_unlock_is_negative_byte
-// Let everybody know we have it
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-
-/*
- * __clear_bit_unlock - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * __clear_bit() is non-atomic and implies release semantics before the memory
- * operation. It can be used for an unlock if no other CPUs can concurrently
- * modify other bits in the word.
- */
-static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- __clear_bit(nr, addr);
+ arch___clear_bit(nr, addr);
}
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch___change_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __always_inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+arch_change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -196,42 +133,20 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
}
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_set_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
-/**
- * test_and_set_bit_lock - Set a bit and return its old value for lock
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This is the same as test_and_set_bit on x86.
- */
static __always_inline bool
-test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- return test_and_set_bit(nr, addr);
+ return arch_test_and_set_bit(nr, addr);
}
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_set_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -242,28 +157,13 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
return oldbit;
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- *
+/*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
@@ -271,7 +171,8 @@ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
-static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -282,8 +183,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
return oldbit;
}
-/* WARNING: non atomic and it can be reordered! */
-static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch___test_and_change_bit(long nr, volatile unsigned long *addr)
{
bool oldbit;
@@ -295,15 +196,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
return oldbit;
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+arch_test_and_change_bit(long nr, volatile unsigned long *addr)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
@@ -326,16 +220,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
return oldbit;
}
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static bool test_bit(int nr, const volatile unsigned long *addr);
-#endif
-
-#define test_bit(nr, addr) \
+#define arch_test_bit(nr, addr) \
(__builtin_constant_p((nr)) \
? constant_test_bit((nr), (addr)) \
: variable_test_bit((nr), (addr)))
@@ -504,6 +389,8 @@ static __always_inline int fls64(__u64 x)
#include <asm-generic/bitops/const_hweight.h>
+#include <asm-generic/bitops-instrumented.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index a281e61ec60c..29aa7859bdee 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -6,6 +6,9 @@
#include <linux/mm.h> /* for struct page */
#include <linux/pagemap.h>
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
+
static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
#ifdef CONFIG_PARAVIRT_XXL
@@ -47,24 +50,8 @@ extern gfp_t __userpte_alloc_gfp;
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
extern pgtable_t pte_alloc_one(struct mm_struct *);
-/* Should really implement gc for free page table pages. This could be
- done with a reference count in struct page. */
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, struct page *pte)
-{
- pgtable_page_dtor(pte);
- __free_page(pte);
-}
-
extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index f8b1ad2c3828..e3633795fb22 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -285,53 +285,6 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
__pteval_swp_offset(pte)))
-#define gup_get_pte gup_get_pte
-/*
- * WARNING: only to be used in the get_user_pages_fast() implementation.
- *
- * With get_user_pages_fast(), we walk down the pagetables without taking
- * any locks. For this we would like to load the pointers atomically,
- * but that is not possible (without expensive cmpxchg8b) on PAE. What
- * we do have is the guarantee that a PTE will only either go from not
- * present to present, or present to not present or both -- it will not
- * switch to a completely different present page without a TLB flush in
- * between; something that we are blocking by holding interrupts off.
- *
- * Setting ptes from not present to present goes:
- *
- * ptep->pte_high = h;
- * smp_wmb();
- * ptep->pte_low = l;
- *
- * And present to not present goes:
- *
- * ptep->pte_low = 0;
- * smp_wmb();
- * ptep->pte_high = 0;
- *
- * We must ensure here that the load of pte_low sees 'l' iff pte_high
- * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
- * don't see an older value of pte_high. *Then* we recheck pte_low,
- * which ensures that we haven't picked up a changed pte high. We might
- * have gotten rubbish values from pte_low and pte_high, but we are
- * guaranteed that pte_low will not have the present bit set *unless*
- * it is 'l'. Because get_user_pages_fast() only operates on present ptes
- * we're safe.
- */
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
- pte_t pte;
-
- do {
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- } while (unlikely(pte.pte_low != ptep->pte_low));
-
- return pte;
-}
-
#include <asm/pgtable-invert.h>
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 0bb566315621..4990d26dfc73 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -259,14 +259,8 @@ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#define gup_fast_permitted gup_fast_permitted
-static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long)nr_pages << PAGE_SHIFT;
- end = start + len;
- if (end < start)
- return false;
if (end >> __VIRTUAL_MASK_SHIFT)
return false;
return true;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7cf508f78c8c..8eb7193e158d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -391,7 +391,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
put_user_ex(&frame->uc, &frame->puc);
/* Create the ucontext. */
- if (boot_cpu_has(X86_FEATURE_XSAVE))
+ if (static_cpu_has(X86_FEATURE_XSAVE))
put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else
put_user_ex(0, &frame->uc.uc_flags);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 98f6e4f88b04..4a9c63d1c20a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -650,7 +650,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
/*
* The idea using the light way get the spte on x86_32 guest is from
- * gup_get_pte(arch/x86/mm/gup.c).
+ * gup_get_pte (mm/gup.c).
*
* An spte tlb flush may be pending, because kvm_set_pte_rmapp
* coalesces them and we are running out of the MMU lock. Therefore
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 1f67b1e15bf6..44816ff6411f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -13,33 +13,17 @@ phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
EXPORT_SYMBOL(physical_mask);
#endif
-#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
-
#ifdef CONFIG_HIGHPTE
-#define PGALLOC_USER_GFP __GFP_HIGHMEM
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
#else
-#define PGALLOC_USER_GFP 0
+#define PGTABLE_HIGHMEM 0
#endif
-gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
-
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
-}
+gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- struct page *pte;
-
- pte = alloc_pages(__userpte_alloc_gfp, 0);
- if (!pte)
- return NULL;
- if (!pgtable_page_ctor(pte)) {
- __free_page(pte);
- return NULL;
- }
- return pte;
+ return __pte_alloc_one(mm, __userpte_alloc_gfp);
}
static int __init setup_userpte(char *arg)
@@ -235,7 +219,7 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
{
int i;
bool failed = false;
- gfp_t gfp = PGALLOC_GFP;
+ gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
@@ -399,14 +383,14 @@ static inline pgd_t *_pgd_alloc(void)
* We allocate one page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return (pgd_t *)__get_free_pages(PGALLOC_GFP,
+ return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
PGD_ALLOCATION_ORDER);
/*
* Now PAE kernel is not running as a Xen domain. We can allocate
* a 32-byte slab for pgd to save memory space.
*/
- return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
+ return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
}
static inline void _pgd_free(pgd_t *pgd)
@@ -424,7 +408,8 @@ void __init pgd_cache_init(void)
static inline pgd_t *_pgd_alloc(void)
{
- return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
+ return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
+ PGD_ALLOCATION_ORDER);
}
static inline void _pgd_free(pgd_t *pgd)
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index beb44e22afdf..f6e5eeecfc69 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2700,8 +2700,7 @@ struct remap_data {
struct mmu_update *mmu_update;
};
-static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
+static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_data *rmd = data;
pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
diff --git a/drivers/clk/renesas/r8a77470-cpg-mssr.c b/drivers/clk/renesas/r8a77470-cpg-mssr.c
index ab0fb10b6bf0..d81ae65f0d18 100644
--- a/drivers/clk/renesas/r8a77470-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77470-cpg-mssr.c
@@ -175,7 +175,7 @@ static const unsigned int r8a77470_crit_mod_clks[] __initconst = {
*---------------------------------------------------
* 0 0 20 x80 x78 x50
* 0 1 26 x60 x60 x56
- * 1 0 Prohibitted setting
+ * 1 0 Prohibited setting
* 1 1 30 x52 x52 x50
*
* *1 : Table 7.4 indicates VCO output (PLL0 = VCO)
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index e4935dd1fd37..c23bb29e6d3e 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -35,8 +35,7 @@ struct remap_pfn {
pgprot_t prot;
};
-static int remap_pfn(pte_t *pte, pgtable_t token,
- unsigned long addr, void *data)
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 829b0c6944d8..61758201d9b2 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -127,7 +127,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
res = (void *)pbundle->internal_buffer + pbundle->internal_used;
pbundle->internal_used =
ALIGN(new_used, sizeof(*pbundle->internal_buffer));
- if (flags & __GFP_ZERO)
+ if (want_init_on_alloc(flags))
memset(res, 0, size);
return res;
}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 756794ac8fd8..e9767bdcdb44 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -121,6 +121,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
CRASHTYPE(SOFTLOCKUP),
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
@@ -427,6 +430,7 @@ static int __init lkdtm_module_init(void)
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
lkdtm_usercopy_init();
+ lkdtm_heap_init();
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
@@ -473,6 +477,7 @@ static void __exit lkdtm_module_exit(void)
debugfs_remove_recursive(lkdtm_debugfs_root);
/* Handle test-specific clean-up. */
+ lkdtm_heap_exit();
lkdtm_usercopy_exit();
if (lkdtm_kprobe != NULL)
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 65026d7de130..3c5cec85edce 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -7,6 +7,10 @@
#include <linux/slab.h>
#include <linux/sched.h>
+static struct kmem_cache *double_free_cache;
+static struct kmem_cache *a_cache;
+static struct kmem_cache *b_cache;
+
/*
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
@@ -146,3 +150,71 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
kfree(val);
}
+
+void lkdtm_SLAB_FREE_DOUBLE(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate double_free_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345678;
+ pr_info("Attempting double slab free ...\n");
+ kmem_cache_free(double_free_cache, val);
+ kmem_cache_free(double_free_cache, val);
+}
+
+void lkdtm_SLAB_FREE_CROSS(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(a_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate a_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345679;
+ pr_info("Attempting cross-cache slab free ...\n");
+ kmem_cache_free(b_cache, val);
+}
+
+void lkdtm_SLAB_FREE_PAGE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+
+ pr_info("Attempting non-Slab slab free ...\n");
+ kmem_cache_free(NULL, (void *)p);
+ free_page(p);
+}
+
+/*
+ * We have constructors to keep the caches distinctly separated without
+ * needing to boot with "slab_nomerge".
+ */
+static void ctor_double_free(void *region)
+{ }
+static void ctor_a(void *region)
+{ }
+static void ctor_b(void *region)
+{ }
+
+void __init lkdtm_heap_init(void)
+{
+ double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
+ 64, 0, 0, ctor_double_free);
+ a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
+ b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
+}
+
+void __exit lkdtm_heap_exit(void)
+{
+ kmem_cache_destroy(double_free_cache);
+ kmem_cache_destroy(a_cache);
+ kmem_cache_destroy(b_cache);
+}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index bbcd370786d4..6a284a87a037 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -29,11 +29,16 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
/* lkdtm_heap.c */
+void __init lkdtm_heap_init(void);
+void __exit lkdtm_heap_exit(void);
void lkdtm_OVERWRITE_ALLOCATION(void);
void lkdtm_WRITE_AFTER_FREE(void);
void lkdtm_READ_AFTER_FREE(void);
void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
void lkdtm_READ_BUDDY_AFTER_FREE(void);
+void lkdtm_SLAB_FREE_DOUBLE(void);
+void lkdtm_SLAB_FREE_CROSS(void);
+void lkdtm_SLAB_FREE_PAGE(void);
/* lkdtm_perms.c */
void __init lkdtm_perms_init(void);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 81caa3782ec0..3bdda1c98339 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -598,7 +598,7 @@ enum ht_channel_width {
HT_CHANNEL_WIDTH_MAX,
};
-/* Ref: 802.11i sepc D10.0 7.3.2.25.1
+/* Ref: 802.11i spec D10.0 7.3.2.25.1
* Cipher Suites Encryption Algorithms
*/
enum rt_enc_alg {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6d6b14295734..faf43b1d3dbe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2965,7 +2965,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fcp_poll_timer);
break;
case LPFC_PCI_DEV_OC:
- /* Stop any OneConnect device sepcific driver timers */
+ /* Stop any OneConnect device specific driver timers */
lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
break;
default:
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index d26bdfd11266..25c75b977666 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -45,7 +45,7 @@ static u32 phy_CalculateBitShift(u32 BitMask)
/**
* Function: PHY_QueryBBReg
*
-* OverView: Read "sepcific bits" from BB register
+* OverView: Read "specific bits" from BB register
*
* Input:
* struct adapter * Adapter,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 469dfbd6cf90..4c339c7e66e5 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -264,8 +264,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
/* ------------------------------------------------------------------ */
-static int find_grant_ptes(pte_t *pte, pgtable_t token,
- unsigned long addr, void *data)
+static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
{
struct gntdev_grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
@@ -292,8 +291,7 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
}
#ifdef CONFIG_X86
-static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
- unsigned long addr, void *data)
+static int set_grant_ptes_as_special(pte_t *pte, unsigned long addr, void *data)
{
set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
return 0;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 1ff38d8036e9..2f5ce7230a43 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -731,8 +731,7 @@ struct remap_pfn {
unsigned long i;
};
-static int remap_pfn_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
struct page *page = r->pages[r->i];
@@ -966,8 +965,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
* on a per pfn/pte basis. Mapping calls that fail with ENOENT
* can be then retried until success.
*/
-static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
- unsigned long addr, void *data)
+static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
{
return pte_none(*pte) ? 0 : -EBUSY;
}
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index e7df65d32c91..ba883a80b3c0 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -93,8 +93,7 @@ static void setup_hparams(unsigned long gfn, void *data)
info->fgfn++;
}
-static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
+static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
{
struct remap_data *info = data;
struct page *page = info->pages[info->index++];
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index bc57ae9e2963..cce9ace651a2 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -35,8 +35,9 @@
* @page: structure to page
*
*/
-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
+static int v9fs_fid_readpage(void *data, struct page *page)
{
+ struct p9_fid *fid = data;
struct inode *inode = page->mapping->host;
struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
struct iov_iter to;
@@ -107,7 +108,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
return ret;
- ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+ ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
+ filp->private_data);
p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
return ret;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 7d8654a1472e..f8fb89b10227 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -109,9 +109,9 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
return ret;
}
-int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
+int jffs2_do_readpage_unlock(void *data, struct page *pg)
{
- int ret = jffs2_do_readpage_nolock(inode, pg);
+ int ret = jffs2_do_readpage_nolock(data, pg);
unlock_page(pg);
return ret;
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 112d85849db1..8a20ddd25f2d 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -687,7 +687,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
struct page *pg;
pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT,
- (void *)jffs2_do_readpage_unlock, inode);
+ jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index a2dbbb3f4c74..bd3d5f0ddc34 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -155,7 +155,7 @@ extern const struct file_operations jffs2_file_operations;
extern const struct inode_operations jffs2_file_inode_operations;
extern const struct address_space_operations jffs2_file_address_operations;
int jffs2_fsync(struct file *, loff_t, loff_t, int);
-int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
+int jffs2_do_readpage_unlock(void *data, struct page *pg);
/* ioctl.c */
long jffs2_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index b428c295d13f..5778d1347b35 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -288,10 +288,13 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
/*
* For queues with unlimited length lost events are not expected and
* can possibly have security implications. Avoid losing events when
- * memory is short.
+ * memory is short. For the limited size queues, avoid OOM killer in the
+ * target monitoring memcg as it may have security repercussion.
*/
if (group->max_events == UINT_MAX)
gfp |= __GFP_NOFAIL;
+ else
+ gfp |= __GFP_RETRY_MAYFAIL;
/* Whoever is interested in the event, pays for the allocation. */
memalloc_use_memcg(group->memcg);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 2fda08b2b885..d510223d302c 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -90,9 +90,13 @@ int inotify_handle_event(struct fsnotify_group *group,
i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark);
- /* Whoever is interested in the event, pays for the allocation. */
+ /*
+ * Whoever is interested in the event, pays for the allocation. Do not
+ * trigger OOM killer in the target monitoring memcg as it may have
+ * security repercussion.
+ */
memalloc_use_memcg(group->memcg);
- event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT);
+ event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
memalloc_unuse_memcg();
if (unlikely(!event)) {
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d1348fc4ca6d..0c335b51043d 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6191,17 +6191,17 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
if (le16_to_cpu(tl->tl_used)) {
trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
- *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
+ /*
+ * Assuming the write-out below goes well, this copy will be
+ * passed back to recovery for processing.
+ */
+ *tl_copy = kmemdup(tl_bh->b_data, tl_bh->b_size, GFP_KERNEL);
if (!(*tl_copy)) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
- /* Assuming the write-out below goes well, this copy
- * will be passed back to recovery for processing. */
- memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size);
-
/* All we need to do to clear the truncate log is set
* tl_used. */
tl->tl_used = 0;
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index 005b813a56b6..429e6a8359a5 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -242,57 +242,29 @@ static struct dentry *blockcheck_debugfs_create(const char *name,
static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
{
if (stats) {
- debugfs_remove(stats->b_debug_check);
- stats->b_debug_check = NULL;
- debugfs_remove(stats->b_debug_failure);
- stats->b_debug_failure = NULL;
- debugfs_remove(stats->b_debug_recover);
- stats->b_debug_recover = NULL;
- debugfs_remove(stats->b_debug_dir);
+ debugfs_remove_recursive(stats->b_debug_dir);
stats->b_debug_dir = NULL;
}
}
-static int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
- struct dentry *parent)
+static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
+ struct dentry *parent)
{
- int rc = -EINVAL;
-
- if (!stats)
- goto out;
-
stats->b_debug_dir = debugfs_create_dir("blockcheck", parent);
- if (!stats->b_debug_dir)
- goto out;
- stats->b_debug_check =
- blockcheck_debugfs_create("blocks_checked",
- stats->b_debug_dir,
- &stats->b_check_count);
+ blockcheck_debugfs_create("blocks_checked", stats->b_debug_dir,
+ &stats->b_check_count);
- stats->b_debug_failure =
- blockcheck_debugfs_create("checksums_failed",
- stats->b_debug_dir,
- &stats->b_failure_count);
+ blockcheck_debugfs_create("checksums_failed", stats->b_debug_dir,
+ &stats->b_failure_count);
- stats->b_debug_recover =
- blockcheck_debugfs_create("ecc_recoveries",
- stats->b_debug_dir,
- &stats->b_recover_count);
- if (stats->b_debug_check && stats->b_debug_failure &&
- stats->b_debug_recover)
- rc = 0;
-
-out:
- if (rc)
- ocfs2_blockcheck_debug_remove(stats);
- return rc;
+ blockcheck_debugfs_create("ecc_recoveries", stats->b_debug_dir,
+ &stats->b_recover_count);
}
#else
-static inline int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
- struct dentry *parent)
+static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats,
+ struct dentry *parent)
{
- return 0;
}
static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats)
@@ -301,10 +273,10 @@ static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *
#endif /* CONFIG_DEBUG_FS */
/* Always-called wrappers for starting and stopping the debugfs files */
-int ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
- struct dentry *parent)
+void ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
+ struct dentry *parent)
{
- return ocfs2_blockcheck_debug_install(stats, parent);
+ ocfs2_blockcheck_debug_install(stats, parent);
}
void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats)
diff --git a/fs/ocfs2/blockcheck.h b/fs/ocfs2/blockcheck.h
index f2d2689407fa..8f17d2c85f40 100644
--- a/fs/ocfs2/blockcheck.h
+++ b/fs/ocfs2/blockcheck.h
@@ -25,9 +25,6 @@ struct ocfs2_blockcheck_stats {
* ocfs2_blockcheck_stats_debugfs_install()
*/
struct dentry *b_debug_dir; /* Parent of the debugfs files */
- struct dentry *b_debug_check; /* Exposes b_check_count */
- struct dentry *b_debug_failure; /* Exposes b_failure_count */
- struct dentry *b_debug_recover; /* Exposes b_recover_count */
};
@@ -56,8 +53,8 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
struct ocfs2_blockcheck_stats *stats);
/* Debug Initialization */
-int ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
- struct dentry *parent);
+void ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats,
+ struct dentry *parent);
void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats);
/*
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 7a3a096856a8..f1b613327ac8 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -92,10 +92,6 @@ static struct o2hb_debug_buf *o2hb_db_failedregions;
#define O2HB_DEBUG_REGION_PINNED "pinned"
static struct dentry *o2hb_debug_dir;
-static struct dentry *o2hb_debug_livenodes;
-static struct dentry *o2hb_debug_liveregions;
-static struct dentry *o2hb_debug_quorumregions;
-static struct dentry *o2hb_debug_failedregions;
static LIST_HEAD(o2hb_all_regions);
@@ -1184,7 +1180,7 @@ bail:
if (atomic_read(&reg->hr_steady_iterations) != 0) {
if (atomic_dec_and_test(&reg->hr_unsteady_iterations)) {
printk(KERN_NOTICE "o2hb: Unable to stabilize "
- "heartbeart on region %s (%s)\n",
+ "heartbeat on region %s (%s)\n",
config_item_name(&reg->hr_item),
reg->hr_dev_name);
atomic_set(&reg->hr_steady_iterations, 0);
@@ -1391,11 +1387,7 @@ static const struct file_operations o2hb_debug_fops = {
void o2hb_exit(void)
{
- debugfs_remove(o2hb_debug_failedregions);
- debugfs_remove(o2hb_debug_quorumregions);
- debugfs_remove(o2hb_debug_liveregions);
- debugfs_remove(o2hb_debug_livenodes);
- debugfs_remove(o2hb_debug_dir);
+ debugfs_remove_recursive(o2hb_debug_dir);
kfree(o2hb_db_livenodes);
kfree(o2hb_db_liveregions);
kfree(o2hb_db_quorumregions);
@@ -1419,79 +1411,37 @@ static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
&o2hb_debug_fops);
}
-static int o2hb_debug_init(void)
+static void o2hb_debug_init(void)
{
- int ret = -ENOMEM;
-
o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
- if (!o2hb_debug_dir) {
- mlog_errno(ret);
- goto bail;
- }
- o2hb_debug_livenodes = o2hb_debug_create(O2HB_DEBUG_LIVENODES,
- o2hb_debug_dir,
- &o2hb_db_livenodes,
- sizeof(*o2hb_db_livenodes),
- O2HB_DB_TYPE_LIVENODES,
- sizeof(o2hb_live_node_bitmap),
- O2NM_MAX_NODES,
- o2hb_live_node_bitmap);
- if (!o2hb_debug_livenodes) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_LIVENODES, o2hb_debug_dir,
+ &o2hb_db_livenodes, sizeof(*o2hb_db_livenodes),
+ O2HB_DB_TYPE_LIVENODES, sizeof(o2hb_live_node_bitmap),
+ O2NM_MAX_NODES, o2hb_live_node_bitmap);
- o2hb_debug_liveregions = o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS,
- o2hb_debug_dir,
- &o2hb_db_liveregions,
- sizeof(*o2hb_db_liveregions),
- O2HB_DB_TYPE_LIVEREGIONS,
- sizeof(o2hb_live_region_bitmap),
- O2NM_MAX_REGIONS,
- o2hb_live_region_bitmap);
- if (!o2hb_debug_liveregions) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_LIVEREGIONS, o2hb_debug_dir,
+ &o2hb_db_liveregions, sizeof(*o2hb_db_liveregions),
+ O2HB_DB_TYPE_LIVEREGIONS,
+ sizeof(o2hb_live_region_bitmap), O2NM_MAX_REGIONS,
+ o2hb_live_region_bitmap);
- o2hb_debug_quorumregions =
- o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS,
- o2hb_debug_dir,
- &o2hb_db_quorumregions,
- sizeof(*o2hb_db_quorumregions),
- O2HB_DB_TYPE_QUORUMREGIONS,
- sizeof(o2hb_quorum_region_bitmap),
- O2NM_MAX_REGIONS,
- o2hb_quorum_region_bitmap);
- if (!o2hb_debug_quorumregions) {
- mlog_errno(ret);
- goto bail;
- }
-
- o2hb_debug_failedregions =
- o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS,
- o2hb_debug_dir,
- &o2hb_db_failedregions,
- sizeof(*o2hb_db_failedregions),
- O2HB_DB_TYPE_FAILEDREGIONS,
- sizeof(o2hb_failed_region_bitmap),
- O2NM_MAX_REGIONS,
- o2hb_failed_region_bitmap);
- if (!o2hb_debug_failedregions) {
- mlog_errno(ret);
- goto bail;
- }
+ o2hb_debug_create(O2HB_DEBUG_QUORUMREGIONS, o2hb_debug_dir,
+ &o2hb_db_quorumregions,
+ sizeof(*o2hb_db_quorumregions),
+ O2HB_DB_TYPE_QUORUMREGIONS,
+ sizeof(o2hb_quorum_region_bitmap), O2NM_MAX_REGIONS,
+ o2hb_quorum_region_bitmap);
- ret = 0;
-bail:
- if (ret)
- o2hb_exit();
-
- return ret;
+ o2hb_debug_create(O2HB_DEBUG_FAILEDREGIONS, o2hb_debug_dir,
+ &o2hb_db_failedregions,
+ sizeof(*o2hb_db_failedregions),
+ O2HB_DB_TYPE_FAILEDREGIONS,
+ sizeof(o2hb_failed_region_bitmap), O2NM_MAX_REGIONS,
+ o2hb_failed_region_bitmap);
}
-int o2hb_init(void)
+void o2hb_init(void)
{
int i;
@@ -1511,7 +1461,7 @@ int o2hb_init(void)
o2hb_dependent_users = 0;
- return o2hb_debug_init();
+ o2hb_debug_init();
}
/* if we're already in a callback then we're already serialized by the sem */
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h
index 7f37540ac4ab..beed31ea86cf 100644
--- a/fs/ocfs2/cluster/heartbeat.h
+++ b/fs/ocfs2/cluster/heartbeat.h
@@ -63,7 +63,7 @@ void o2hb_unregister_callback(const char *region_uuid,
void o2hb_fill_node_map(unsigned long *map,
unsigned bytes);
void o2hb_exit(void);
-int o2hb_init(void);
+void o2hb_init(void);
int o2hb_check_node_heartbeating_no_sem(u8 node_num);
int o2hb_check_node_heartbeating_from_callback(u8 node_num);
void o2hb_stop_all_regions(void);
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 0784575f4c2a..02bf4a1774cc 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -38,10 +38,6 @@
#define SHOW_SOCK_STATS 1
static struct dentry *o2net_dentry;
-static struct dentry *sc_dentry;
-static struct dentry *nst_dentry;
-static struct dentry *stats_dentry;
-static struct dentry *nodes_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
@@ -490,36 +486,23 @@ static const struct file_operations nodes_fops = {
void o2net_debugfs_exit(void)
{
- debugfs_remove(nodes_dentry);
- debugfs_remove(stats_dentry);
- debugfs_remove(sc_dentry);
- debugfs_remove(nst_dentry);
- debugfs_remove(o2net_dentry);
+ debugfs_remove_recursive(o2net_dentry);
}
-int o2net_debugfs_init(void)
+void o2net_debugfs_init(void)
{
umode_t mode = S_IFREG|S_IRUSR;
o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
- if (o2net_dentry)
- nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
- o2net_dentry, NULL, &nst_seq_fops);
- if (nst_dentry)
- sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
- o2net_dentry, NULL, &sc_seq_fops);
- if (sc_dentry)
- stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
- o2net_dentry, NULL, &stats_seq_fops);
- if (stats_dentry)
- nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
- o2net_dentry, NULL, &nodes_fops);
- if (nodes_dentry)
- return 0;
-
- o2net_debugfs_exit();
- mlog_errno(-ENOMEM);
- return -ENOMEM;
+
+ debugfs_create_file(NST_DEBUG_NAME, mode, o2net_dentry, NULL,
+ &nst_seq_fops);
+ debugfs_create_file(SC_DEBUG_NAME, mode, o2net_dentry, NULL,
+ &sc_seq_fops);
+ debugfs_create_file(STATS_DEBUG_NAME, mode, o2net_dentry, NULL,
+ &stats_seq_fops);
+ debugfs_create_file(NODES_DEBUG_NAME, mode, o2net_dentry, NULL,
+ &nodes_fops);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 2234f7fd1f7c..7a7640c59f3c 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -828,9 +828,7 @@ static int __init init_o2nm(void)
{
int ret = -1;
- ret = o2hb_init();
- if (ret)
- goto out;
+ o2hb_init();
ret = o2net_init();
if (ret)
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 3d5d4b2b1356..5c424a099280 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -76,7 +76,7 @@ static void o2quo_fence_self(void)
};
}
-/* Indicate that a timeout occurred on a hearbeat region write. The
+/* Indicate that a timeout occurred on a heartbeat region write. The
* other nodes in the cluster may consider us dead at that time so we
* want to "fence" ourselves so that we don't scribble on the disk
* after they think they've recovered us. This can't solve all
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index c599463d0694..48a3398f0bf5 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1762,7 +1762,7 @@ static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
(msecs_to_jiffies(o2net_reconnect_delay()) + 1);
if (node_num != o2nm_this_node()) {
- /* believe it or not, accept and node hearbeating testing
+ /* believe it or not, accept and node heartbeating testing
* can succeed for this node before we got here.. so
* only use set_nn_state to clear the persistent error
* if that hasn't already happened */
@@ -2129,8 +2129,7 @@ int o2net_init(void)
o2quo_init();
- if (o2net_debugfs_init())
- goto out;
+ o2net_debugfs_init();
o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL);
o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL);
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index dd4242be3f1f..de87cbffd175 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -109,16 +109,15 @@ struct o2net_send_tracking;
struct o2net_sock_container;
#ifdef CONFIG_DEBUG_FS
-int o2net_debugfs_init(void);
+void o2net_debugfs_init(void);
void o2net_debugfs_exit(void);
void o2net_debug_add_nst(struct o2net_send_tracking *nst);
void o2net_debug_del_nst(struct o2net_send_tracking *nst);
void o2net_debug_add_sc(struct o2net_sock_container *sc);
void o2net_debug_del_sc(struct o2net_sock_container *sc);
#else
-static inline int o2net_debugfs_init(void)
+static inline void o2net_debugfs_init(void)
{
- return 0;
}
static inline void o2net_debugfs_exit(void)
{
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index c8af5bc9e980..a4b58ba99927 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -851,7 +851,7 @@ static const struct file_operations debug_state_fops = {
/* end - debug state funcs */
/* files in subroot */
-int dlm_debug_init(struct dlm_ctxt *dlm)
+void dlm_debug_init(struct dlm_ctxt *dlm)
{
struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
@@ -860,10 +860,6 @@ int dlm_debug_init(struct dlm_ctxt *dlm)
S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot,
dlm, &debug_state_fops);
- if (!dc->debug_state_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
/* for dumping lockres */
dc->debug_lockres_dentry =
@@ -871,20 +867,12 @@ int dlm_debug_init(struct dlm_ctxt *dlm)
S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot,
dlm, &debug_lockres_fops);
- if (!dc->debug_lockres_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
/* for dumping mles */
dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE,
S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot,
dlm, &debug_mle_fops);
- if (!dc->debug_mle_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
/* for dumping lockres on the purge list */
dc->debug_purgelist_dentry =
@@ -892,15 +880,6 @@ int dlm_debug_init(struct dlm_ctxt *dlm)
S_IFREG|S_IRUSR,
dlm->dlm_debugfs_subroot,
dlm, &debug_purgelist_fops);
- if (!dc->debug_purgelist_dentry) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
-
- return 0;
-
-bail:
- return -ENOMEM;
}
void dlm_debug_shutdown(struct dlm_ctxt *dlm)
@@ -920,24 +899,16 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm)
/* subroot - domain dir */
int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
- dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
- dlm_debugfs_root);
- if (!dlm->dlm_debugfs_subroot) {
- mlog_errno(-ENOMEM);
- goto bail;
- }
-
dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt),
GFP_KERNEL);
if (!dlm->dlm_debug_ctxt) {
mlog_errno(-ENOMEM);
- goto bail;
+ return -ENOMEM;
}
+ dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
+ dlm_debugfs_root);
return 0;
-bail:
- dlm_destroy_debugfs_subroot(dlm);
- return -ENOMEM;
}
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
@@ -946,14 +917,9 @@ void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
}
/* debugfs root */
-int dlm_create_debugfs_root(void)
+void dlm_create_debugfs_root(void)
{
dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL);
- if (!dlm_debugfs_root) {
- mlog_errno(-ENOMEM);
- return -ENOMEM;
- }
- return 0;
}
void dlm_destroy_debugfs_root(void)
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 74d019694c7e..7d0c7c9013ce 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -28,20 +28,19 @@ struct debug_lockres {
struct dlm_lock_resource *dl_res;
};
-int dlm_debug_init(struct dlm_ctxt *dlm);
+void dlm_debug_init(struct dlm_ctxt *dlm);
void dlm_debug_shutdown(struct dlm_ctxt *dlm);
int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
-int dlm_create_debugfs_root(void);
+void dlm_create_debugfs_root(void);
void dlm_destroy_debugfs_root(void);
#else
-static inline int dlm_debug_init(struct dlm_ctxt *dlm)
+static inline void dlm_debug_init(struct dlm_ctxt *dlm)
{
- return 0;
}
static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm)
{
@@ -53,9 +52,8 @@ static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
}
-static inline int dlm_create_debugfs_root(void)
+static inline void dlm_create_debugfs_root(void)
{
- return 0;
}
static inline void dlm_destroy_debugfs_root(void)
{
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 9021e72e1f98..7338b5d4647c 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1881,11 +1881,7 @@ static int dlm_join_domain(struct dlm_ctxt *dlm)
goto bail;
}
- status = dlm_debug_init(dlm);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ dlm_debug_init(dlm);
snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
@@ -2346,9 +2342,7 @@ static int __init dlm_init(void)
goto error;
}
- status = dlm_create_debugfs_root();
- if (status)
- goto error;
+ dlm_create_debugfs_root();
return 0;
error:
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 810f841494ef..74b768ca1cd8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2161,7 +2161,7 @@ put:
* think that $RECOVERY is currently mastered by a dead node. If so,
* we wait a short time to allow that node to get notified by its own
* heartbeat stack, then check again. All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is
+ * mastered by dead nodes are purged when the heartbeat callback is
* fired, so we can know for sure that it is safe to continue once
* the node returns a live node or no node. */
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index e22d6a115220..064ce5bbc3f6 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1109,7 +1109,7 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
{
u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
int mres_total_locks = be32_to_cpu(mres->total_locks);
- int sz, ret = 0, status = 0;
+ int ret = 0, status = 0;
u8 orig_flags = mres->flags,
orig_master = mres->master;
@@ -1117,9 +1117,6 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
if (!mres->num_locks)
return 0;
- sz = sizeof(struct dlm_migratable_lockres) +
- (mres->num_locks * sizeof(struct dlm_migratable_lock));
-
/* add an all-done flag if we reached the last lock */
orig_flags = mres->flags;
BUG_ON(total_locks > mres_total_locks);
@@ -1133,7 +1130,8 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
/* send it */
ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
- sz, send_to, &status);
+ struct_size(mres, ml, mres->num_locks),
+ send_to, &status);
if (ret < 0) {
/* XXX: negative status is not handled.
* this will end up killing this node. */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index b5fc5d3c7525..14207234fa3d 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -426,6 +426,7 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
res->l_lock_refresh = 0;
+ res->l_lock_wait = 0;
memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
}
@@ -460,6 +461,8 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
if (ret)
stats->ls_fail++;
+
+ stats->ls_last = ktime_to_us(ktime_get_real());
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
@@ -467,6 +470,21 @@ static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
lockres->l_lock_refresh++;
}
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_mask_waiter *mw;
+
+ if (list_empty(&lockres->l_mask_waiters)) {
+ lockres->l_lock_wait = 0;
+ return;
+ }
+
+ mw = list_first_entry(&lockres->l_mask_waiters,
+ struct ocfs2_mask_waiter, mw_item);
+ lockres->l_lock_wait =
+ ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
+}
+
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
mw->mw_lock_start = ktime_get();
@@ -482,6 +500,9 @@ static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
}
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
@@ -875,6 +896,7 @@ static void lockres_set_flags(struct ocfs2_lock_res *lockres,
list_del_init(&mw->mw_item);
mw->mw_status = 0;
complete(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
}
}
static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
@@ -1386,6 +1408,7 @@ static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
mw->mw_mask = mask;
mw->mw_goal = goal;
+ ocfs2_track_lock_wait(lockres);
}
/* returns 0 if the mw that was removed was already satisfied, -EBUSY
@@ -1402,6 +1425,7 @@ static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
list_del_init(&mw->mw_item);
init_completion(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
}
return ret;
@@ -2989,6 +3013,8 @@ struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
dlm_debug->d_locking_state = NULL;
+ dlm_debug->d_locking_filter = NULL;
+ dlm_debug->d_filter_secs = 0;
out:
return dlm_debug;
}
@@ -3079,17 +3105,43 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
* - Lock stats printed
* New in version 3
* - Max time in lock stats is in usecs (instead of nsecs)
+ * New in version 4
+ * - Add last pr/ex unlock times and first lock wait time in usecs
*/
-#define OCFS2_DLM_DEBUG_STR_VERSION 3
+#define OCFS2_DLM_DEBUG_STR_VERSION 4
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
char *lvb;
struct ocfs2_lock_res *lockres = v;
+#ifdef CONFIG_OCFS2_FS_STATS
+ u64 now, last;
+ struct ocfs2_dlm_debug *dlm_debug =
+ ((struct ocfs2_dlm_seq_priv *)m->private)->p_dlm_debug;
+#endif
if (!lockres)
return -EINVAL;
+#ifdef CONFIG_OCFS2_FS_STATS
+ if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
+ now = ktime_to_us(ktime_get_real());
+ if (lockres->l_lock_prmode.ls_last >
+ lockres->l_lock_exmode.ls_last)
+ last = lockres->l_lock_prmode.ls_last;
+ else
+ last = lockres->l_lock_exmode.ls_last;
+ /*
+ * Use d_filter_secs field to filter lock resources dump,
+ * the default d_filter_secs(0) value filters nothing,
+ * otherwise, only dump the last N seconds active lock
+ * resources.
+ */
+ if (div_u64(now - last, 1000000) > dlm_debug->d_filter_secs)
+ return 0;
+ }
+#endif
+
seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
@@ -3131,6 +3183,9 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
# define lock_refresh(_l) ((_l)->l_lock_refresh)
+# define lock_last_prmode(_l) ((_l)->l_lock_prmode.ls_last)
+# define lock_last_exmode(_l) ((_l)->l_lock_exmode.ls_last)
+# define lock_wait(_l) ((_l)->l_lock_wait)
#else
# define lock_num_prmode(_l) (0)
# define lock_num_exmode(_l) (0)
@@ -3141,6 +3196,9 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
# define lock_max_prmode(_l) (0)
# define lock_max_exmode(_l) (0)
# define lock_refresh(_l) (0)
+# define lock_last_prmode(_l) (0ULL)
+# define lock_last_exmode(_l) (0ULL)
+# define lock_wait(_l) (0ULL)
#endif
/* The following seq_print was added in version 2 of this output */
seq_printf(m, "%u\t"
@@ -3151,7 +3209,10 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
"%llu\t"
"%u\t"
"%u\t"
- "%u\t",
+ "%u\t"
+ "%llu\t"
+ "%llu\t"
+ "%llu\t",
lock_num_prmode(lockres),
lock_num_exmode(lockres),
lock_num_prmode_failed(lockres),
@@ -3160,7 +3221,10 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
lock_total_exmode(lockres),
lock_max_prmode(lockres),
lock_max_exmode(lockres),
- lock_refresh(lockres));
+ lock_refresh(lockres),
+ lock_last_prmode(lockres),
+ lock_last_exmode(lockres),
+ lock_wait(lockres));
/* End the line */
seq_printf(m, "\n");
@@ -3214,9 +3278,8 @@ static const struct file_operations ocfs2_dlm_debug_fops = {
.llseek = seq_lseek,
};
-static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
+static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{
- int ret = 0;
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
dlm_debug->d_locking_state = debugfs_create_file("locking_state",
@@ -3224,16 +3287,11 @@ static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
osb->osb_debug_root,
osb,
&ocfs2_dlm_debug_fops);
- if (!dlm_debug->d_locking_state) {
- ret = -EINVAL;
- mlog(ML_ERROR,
- "Unable to create locking state debugfs file.\n");
- goto out;
- }
- ocfs2_get_dlm_debug(dlm_debug);
-out:
- return ret;
+ dlm_debug->d_locking_filter = debugfs_create_u32("locking_filter",
+ 0600,
+ osb->osb_debug_root,
+ &dlm_debug->d_filter_secs);
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
@@ -3242,6 +3300,7 @@ static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
if (dlm_debug) {
debugfs_remove(dlm_debug->d_locking_state);
+ debugfs_remove(dlm_debug->d_locking_filter);
ocfs2_put_dlm_debug(dlm_debug);
}
}
@@ -3256,11 +3315,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
goto local;
}
- status = ocfs2_dlm_init_debug(osb);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_dlm_init_debug(osb);
/* launch downconvert thread */
osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
@@ -4352,7 +4407,6 @@ static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
static int ocfs2_downconvert_thread(void *arg)
{
- int status = 0;
struct ocfs2_super *osb = arg;
/* only quit once we've been asked to stop and there is no more
@@ -4370,7 +4424,7 @@ static int ocfs2_downconvert_thread(void *arg)
}
osb->dc_task = NULL;
- return status;
+ return 0;
}
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index f03674afbd30..158e5af767fd 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -424,12 +424,11 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
bh = osb->local_alloc_bh;
alloc = (struct ocfs2_dinode *) bh->b_data;
- alloc_copy = kmalloc(bh->b_size, GFP_NOFS);
+ alloc_copy = kmemdup(alloc, bh->b_size, GFP_NOFS);
if (!alloc_copy) {
status = -ENOMEM;
goto out_commit;
}
- memcpy(alloc_copy, alloc, bh->b_size);
status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode),
bh, OCFS2_JOURNAL_ACCESS_WRITE);
@@ -1272,13 +1271,12 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
* local alloc shutdown won't try to double free main bitmap
* bits. Make a copy so the sync function knows which bits to
* free. */
- alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS);
+ alloc_copy = kmemdup(alloc, osb->local_alloc_bh->b_size, GFP_NOFS);
if (!alloc_copy) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
- memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size);
status = ocfs2_journal_access_di(handle,
INODE_CACHE(local_alloc_inode),
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index a4647a646f07..fddbbd60f434 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -150,6 +150,7 @@ struct ocfs2_lock_stats {
/* Storing max wait in usecs saves 24 bytes per inode */
u32 ls_max; /* Max wait in USEC */
+ u64 ls_last; /* Last unlock time in USEC */
};
#endif
@@ -191,6 +192,7 @@ struct ocfs2_lock_res {
#ifdef CONFIG_OCFS2_FS_STATS
struct ocfs2_lock_stats l_lock_prmode; /* PR mode stats */
u32 l_lock_refresh; /* Disk refreshes */
+ u64 l_lock_wait; /* First lock wait time */
struct ocfs2_lock_stats l_lock_exmode; /* EX mode stats */
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -222,6 +224,8 @@ struct ocfs2_orphan_scan {
struct ocfs2_dlm_debug {
struct kref d_refcnt;
struct dentry *d_locking_state;
+ struct dentry *d_locking_filter;
+ u32 d_filter_secs;
struct list_head d_lockres_tracking;
};
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index a201f9780b35..8b2f39506648 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1079,33 +1079,15 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
ocfs2_debugfs_root);
- if (!osb->osb_debug_root) {
- status = -EINVAL;
- mlog(ML_ERROR, "Unable to create per-mount debugfs root.\n");
- goto read_super_error;
- }
osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR,
osb->osb_debug_root,
osb,
&ocfs2_osb_debug_fops);
- if (!osb->osb_ctxt) {
- status = -EINVAL;
- mlog_errno(status);
- goto read_super_error;
- }
- if (ocfs2_meta_ecc(osb)) {
- status = ocfs2_blockcheck_stats_debugfs_install(
- &osb->osb_ecc_stats,
- osb->osb_debug_root);
- if (status) {
- mlog(ML_ERROR,
- "Unable to create blockcheck statistics "
- "files\n");
- goto read_super_error;
- }
- }
+ if (ocfs2_meta_ecc(osb))
+ ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
+ osb->osb_debug_root);
status = ocfs2_mount_volume(sb);
if (status < 0)
@@ -1592,11 +1574,6 @@ static int __init ocfs2_init(void)
goto out2;
ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL);
- if (!ocfs2_debugfs_root) {
- status = -ENOMEM;
- mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
- goto out3;
- }
ocfs2_set_locking_protocol();
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c40fca98f2b7..77eb628ecc7f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -532,8 +532,7 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
unsigned long totalpages = totalram_pages() + total_swap_pages;
unsigned long points = 0;
- points = oom_badness(task, NULL, NULL, totalpages) *
- 1000 / totalpages;
+ points = oom_badness(task, totalpages) * 1000 / totalpages;
seq_printf(m, "%lu\n", points);
return 0;
@@ -1962,9 +1961,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
- down_read(&mm->mmap_sem);
- exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
- up_read(&mm->mmap_sem);
+ status = down_read_killable(&mm->mmap_sem);
+ if (!status) {
+ exact_vma_exists = !!find_exact_vma(mm, vm_start,
+ vm_end);
+ up_read(&mm->mmap_sem);
+ }
}
mmput(mm);
@@ -2010,8 +2012,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
+ rc = down_read_killable(&mm->mmap_sem);
+ if (rc)
+ goto out_mmput;
+
rc = -ENOENT;
- down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
*path = vma->vm_file->f_path;
@@ -2107,7 +2112,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
if (!mm)
goto out_put_task;
- down_read(&mm->mmap_sem);
+ result = ERR_PTR(-EINTR);
+ if (down_read_killable(&mm->mmap_sem))
+ goto out_put_mm;
+
+ result = ERR_PTR(-ENOENT);
vma = find_exact_vma(mm, vm_start, vm_end);
if (!vma)
goto out_no_vma;
@@ -2118,6 +2127,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
out_no_vma:
up_read(&mm->mmap_sem);
+out_put_mm:
mmput(mm);
out_put_task:
put_task_struct(task);
@@ -2160,7 +2170,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
mm = get_task_mm(task);
if (!mm)
goto out_put_task;
- down_read(&mm->mmap_sem);
+
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret) {
+ mmput(mm);
+ goto out_put_task;
+ }
nr_files = 0;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 568d90e17c17..465ea0153b2a 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -120,7 +120,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Committed_AS: ", committed);
seq_printf(m, "VmallocTotal: %8lu kB\n",
(unsigned long)VMALLOC_TOTAL >> 10);
- show_val_kb(m, "VmallocUsed: ", 0ul);
+ show_val_kb(m, "VmallocUsed: ", vmalloc_nr_pages());
show_val_kb(m, "VmallocChunk: ", 0ul);
show_val_kb(m, "Percpu: ", pcpu_nr_pages());
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 01d4eb0e6bd1..dedca3da428a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -166,7 +166,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ mmput(mm);
+ return ERR_PTR(-EINTR);
+ }
+
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
@@ -417,17 +421,53 @@ struct mem_size_stats {
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
u64 pss;
+ u64 pss_anon;
+ u64 pss_file;
+ u64 pss_shmem;
u64 pss_locked;
u64 swap_pss;
bool check_shmem_swap;
};
+static void smaps_page_accumulate(struct mem_size_stats *mss,
+ struct page *page, unsigned long size, unsigned long pss,
+ bool dirty, bool locked, bool private)
+{
+ mss->pss += pss;
+
+ if (PageAnon(page))
+ mss->pss_anon += pss;
+ else if (PageSwapBacked(page))
+ mss->pss_shmem += pss;
+ else
+ mss->pss_file += pss;
+
+ if (locked)
+ mss->pss_locked += pss;
+
+ if (dirty || PageDirty(page)) {
+ if (private)
+ mss->private_dirty += size;
+ else
+ mss->shared_dirty += size;
+ } else {
+ if (private)
+ mss->private_clean += size;
+ else
+ mss->shared_clean += size;
+ }
+}
+
static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked)
{
int i, nr = compound ? 1 << compound_order(page) : 1;
unsigned long size = nr * PAGE_SIZE;
+ /*
+ * First accumulate quantities that depend only on |size| and the type
+ * of the compound page.
+ */
if (PageAnon(page)) {
mss->anonymous += size;
if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
@@ -440,42 +480,25 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
mss->referenced += size;
/*
+ * Then accumulate quantities that may depend on sharing, or that may
+ * differ page-by-page.
+ *
* page_count(page) == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
* page_count().
*/
if (page_count(page) == 1) {
- if (dirty || PageDirty(page))
- mss->private_dirty += size;
- else
- mss->private_clean += size;
- mss->pss += (u64)size << PSS_SHIFT;
- if (locked)
- mss->pss_locked += (u64)size << PSS_SHIFT;
+ smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
+ locked, true);
return;
}
-
for (i = 0; i < nr; i++, page++) {
int mapcount = page_mapcount(page);
- unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
-
- if (mapcount >= 2) {
- if (dirty || PageDirty(page))
- mss->shared_dirty += PAGE_SIZE;
- else
- mss->shared_clean += PAGE_SIZE;
- mss->pss += pss / mapcount;
- if (locked)
- mss->pss_locked += pss / mapcount;
- } else {
- if (dirty || PageDirty(page))
- mss->private_dirty += PAGE_SIZE;
- else
- mss->private_clean += PAGE_SIZE;
- mss->pss += pss;
- if (locked)
- mss->pss_locked += pss;
- }
+ unsigned long pss = PAGE_SIZE << PSS_SHIFT;
+ if (mapcount >= 2)
+ pss /= mapcount;
+ smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
+ mapcount < 2);
}
}
@@ -754,10 +777,23 @@ static void smap_gather_stats(struct vm_area_struct *vma,
seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
/* Show the contents common for smaps and smaps_rollup */
-static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss)
+static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
+ bool rollup_mode)
{
SEQ_PUT_DEC("Rss: ", mss->resident);
SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT);
+ if (rollup_mode) {
+ /*
+ * These are meaningful only for smaps_rollup, otherwise two of
+ * them are zero, and the other one is the same as Pss.
+ */
+ SEQ_PUT_DEC(" kB\nPss_Anon: ",
+ mss->pss_anon >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nPss_File: ",
+ mss->pss_file >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nPss_Shmem: ",
+ mss->pss_shmem >> PSS_SHIFT);
+ }
SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean);
SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty);
SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean);
@@ -794,7 +830,7 @@ static int show_smap(struct seq_file *m, void *v)
SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma));
seq_puts(m, " kB\n");
- __show_smap(m, &mss);
+ __show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma));
@@ -828,7 +864,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- down_read(&mm->mmap_sem);
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret)
+ goto out_put_mm;
+
hold_task_mempolicy(priv);
for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
@@ -841,12 +880,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");
- __show_smap(m, &mss);
+ __show_smap(m, &mss, true);
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
- mmput(mm);
+out_put_mm:
+ mmput(mm);
out_put_task:
put_task_struct(priv->task);
priv->task = NULL;
@@ -1132,7 +1172,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_mm;
}
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ count = -EINTR;
+ goto out_mm;
+ }
tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -1539,7 +1582,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
- down_read(&mm->mmap_sem);
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret)
+ goto out_free;
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem);
start_vaddr = end;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 36bf0f2e102e..7907e6419e57 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ mmput(mm);
+ return ERR_PTR(-EINTR);
+ }
+
/* start from the Nth VMA */
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
if (n-- == 0)
diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
new file mode 100644
index 000000000000..ddd1c6d9d8db
--- /dev/null
+++ b/include/asm-generic/bitops-instrumented.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for bit
+ * operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+
+#include <linux/kasan-checks.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_set_bit(nr, addr);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___set_bit(nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ */
+static inline void clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit(nr, addr);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit(nr, addr);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit_unlock(nr, addr);
+}
+
+/**
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a non-atomic operation but implies a release barrier before the
+ * memory operation. It can be used for an unlock if no other CPUs can
+ * concurrently modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit_unlock(nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_change_bit(nr, addr);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___change_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit(nr, addr);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_set_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit_lock(nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_clear_bit(nr, addr);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_clear_bit(nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_change_bit(nr, addr);
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_change_bit(nr, addr);
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline bool test_bit(long nr, const volatile unsigned long *addr)
+{
+ kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit(nr, addr);
+}
+
+#if defined(arch_clear_bit_unlock_is_negative_byte)
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+static inline bool
+clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+}
+/* Let everybody know we have it. */
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 948714c1535a..8476175c07e7 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -1,13 +1,112 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_GENERIC_PGALLOC_H
#define __ASM_GENERIC_PGALLOC_H
-/*
- * an empty file is enough for a nommu architecture
- */
+
#ifdef CONFIG_MMU
-#error need to implement an architecture specific asm/pgalloc.h
+
+#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
+#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
+
+/**
+ * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+/**
+ * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return __pte_alloc_one_kernel(mm);
+}
+#endif
+
+/**
+ * pte_free_kernel - free PTE-level kernel page table page
+ * @mm: the mm_struct of the current context
+ * @pte: pointer to the memory containing the page table
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+/**
+ * __pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ * @gfp: GFP flags to use for the allocation
+ *
+ * Allocates a page and runs the pgtable_page_ctor().
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation or must have custom GFP flags.
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
+{
+ struct page *pte;
+
+ pte = alloc_page(gfp);
+ if (!pte)
+ return NULL;
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
+
+ return pte;
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
+/**
+ * pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_page_ctor().
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return __pte_alloc_one(mm, GFP_PGTABLE_USER);
+}
#endif
+/*
+ * Should really implement gc for free page table pages. This could be
+ * done with a reference count in struct page.
+ */
+
+/**
+ * pte_free - free PTE-level user page table page
+ * @mm: the mm_struct of the current context
+ * @pte_page: the `struct page` representing the page table
+ */
+static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
+{
+ pgtable_page_dtor(pte_page);
+ __free_page(pte_page);
+}
+
+#else /* CONFIG_MMU */
+
+/* This is enough for a nommu architecture */
#define check_pgt_cache() do { } while (0)
+#endif /* CONFIG_MMU */
+
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 28813c6f44b6..a7cf3599d9a1 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -92,12 +92,14 @@ static inline bool dmar_rcu_check(void)
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
-#define for_each_dev_scope(a, c, p, d) \
- for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
- NULL, (p) < (c)); (p)++)
-
-#define for_each_active_dev_scope(a, c, p, d) \
- for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
+#define for_each_dev_scope(devs, cnt, i, tmp) \
+ for ((i) = 0; ((tmp) = (i) < (cnt) ? \
+ dmar_rcu_dereference((devs)[(i)].dev) : NULL, (i) < (cnt)); \
+ (i)++)
+
+#define for_each_active_dev_scope(devs, cnt, i, tmp) \
+ for_each_dev_scope((devs), (cnt), (i), (tmp)) \
+ if (!(tmp)) { continue; } else
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edf476c8cfb9..edfca4278319 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,29 +16,11 @@ struct user_struct;
struct mmu_gather;
#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
#endif
-
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -608,22 +590,92 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-#define alloc_huge_page(v, a, r) NULL
-#define alloc_huge_page_node(h, nid) NULL
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
-#define alloc_huge_page_vma(h, vma, address) NULL
-#define alloc_bootmem_huge_page(h) NULL
-#define hstate_file(f) NULL
-#define hstate_sizelog(s) NULL
-#define hstate_vma(v) NULL
-#define hstate_inode(i) NULL
-#define page_hstate(page) NULL
-#define huge_page_size(h) PAGE_SIZE
-#define huge_page_mask(h) PAGE_MASK
-#define vma_kernel_pagesize(v) PAGE_SIZE
-#define vma_mmu_pagesize(v) PAGE_SIZE
-#define huge_page_order(h) 0
-#define huge_page_shift(h) PAGE_SHIFT
+
+static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr,
+ int avoid_reserve)
+{
+ return NULL;
+}
+
+static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
+{
+ return NULL;
+}
+
+static inline struct page *
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
+{
+ return NULL;
+}
+
+static inline struct page *alloc_huge_page_vma(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
+{
+ return 0;
+}
+
+static inline struct hstate *hstate_file(struct file *f)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ return NULL;
+}
+
+static inline struct hstate *page_hstate(struct page *page)
+{
+ return NULL;
+}
+
+static inline unsigned long huge_page_size(struct hstate *h)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long huge_page_mask(struct hstate *h)
+{
+ return PAGE_MASK;
+}
+
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
+static inline unsigned int huge_page_order(struct hstate *h)
+{
+ return 0;
+}
+
+static inline unsigned int huge_page_shift(struct hstate *h)
+{
+ return PAGE_SHIFT;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
return false;
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index a61dc075e2ce..ac6aba632f2d 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -2,14 +2,43 @@
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
-#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
-void kasan_check_read(const volatile void *p, unsigned int size);
-void kasan_check_write(const volatile void *p, unsigned int size);
+#include <linux/types.h>
+
+/*
+ * __kasan_check_*: Always available when KASAN is enabled. This may be used
+ * even in compilation units that selectively disable KASAN, but must use KASAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KASAN
+bool __kasan_check_read(const volatile void *p, unsigned int size);
+bool __kasan_check_write(const volatile void *p, unsigned int size);
+#else
+static inline bool __kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool __kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+#endif
+
+/*
+ * kasan_check_*: Only available when the particular compilation unit has KASAN
+ * instrumentation enabled. May be used in header files.
+ */
+#ifdef __SANITIZE_ADDRESS__
+#define kasan_check_read __kasan_check_read
+#define kasan_check_write __kasan_check_write
#else
-static inline void kasan_check_read(const volatile void *p, unsigned int size)
-{ }
-static inline void kasan_check_write(const volatile void *p, unsigned int size)
-{ }
+static inline bool kasan_check_read(const volatile void *p, unsigned int size)
+{
+ return true;
+}
+static inline bool kasan_check_write(const volatile void *p, unsigned int size)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b40ea104dd36..cc8a03cc9674 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -76,8 +76,11 @@ void kasan_free_shadow(const struct vm_struct *vm);
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
-size_t ksize(const void *);
-static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+size_t __ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr)
+{
+ kasan_unpoison_shadow(ptr, __ksize(ptr));
+}
size_t kasan_metadata_size(struct kmem_cache *cache);
bool kasan_save_enable_multi_shot(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1dcb763bb610..44c41462be33 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -233,8 +233,9 @@ struct mem_cgroup {
/* OOM-Killer disable */
int oom_kill_disable;
- /* memory.events */
+ /* memory.events and memory.events.local */
struct cgroup_file events_file;
+ struct cgroup_file events_local_file;
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
@@ -281,6 +282,7 @@ struct mem_cgroup {
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure;
@@ -392,7 +394,6 @@ out:
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
@@ -747,6 +748,9 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
+ atomic_long_inc(&memcg->memory_events_local[event]);
+ cgroup_file_notify(&memcg->events_local_file);
+
do {
atomic_long_inc(&memcg->memory_events[event]);
cgroup_file_notify(&memcg->events_file);
@@ -870,12 +874,6 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return true;
}
-static inline bool task_in_mem_cgroup(struct task_struct *task,
- const struct mem_cgroup *memcg)
-{
- return true;
-}
-
static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
{
return NULL;
@@ -1273,6 +1271,8 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge(struct page *page, int order);
int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
struct mem_cgroup *memcg);
+void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
+ unsigned int nr_pages);
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1314,6 +1314,14 @@ static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
return 0;
}
+
+static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
+ struct mem_cgroup *memcg)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge_memcg(memcg, 1 << order);
+}
+
/*
* helper for accessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd0b5f4e1e45..f88f0eabcc5e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -633,6 +633,11 @@ static inline bool is_vmalloc_addr(const void *x)
return false;
#endif
}
+
+#ifndef is_ioremap_addr
+#define is_ioremap_addr(x) is_vmalloc_addr(x)
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -2681,8 +2686,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}
-typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data);
+typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -2696,11 +2700,42 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
#endif
-extern bool _debug_pagealloc_enabled;
+#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
+DECLARE_STATIC_KEY_TRUE(init_on_alloc);
+#else
+DECLARE_STATIC_KEY_FALSE(init_on_alloc);
+#endif
+static inline bool want_init_on_alloc(gfp_t flags)
+{
+ if (static_branch_unlikely(&init_on_alloc) &&
+ !page_poisoning_enabled())
+ return true;
+ return flags & __GFP_ZERO;
+}
+
+#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
+DECLARE_STATIC_KEY_TRUE(init_on_free);
+#else
+DECLARE_STATIC_KEY_FALSE(init_on_free);
+#endif
+static inline bool want_init_on_free(void)
+{
+ return static_branch_unlikely(&init_on_free) &&
+ !page_poisoning_enabled();
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
+DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#else
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+#endif
static inline bool debug_pagealloc_enabled(void)
{
- return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
+ if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
+ return false;
+
+ return static_branch_unlikely(&_debug_pagealloc_enabled);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
@@ -2850,11 +2885,9 @@ extern long copy_huge_page_from_user(struct page *dst_page,
bool allow_pagefault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-extern struct page_ext_operations debug_guardpage_ops;
-
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
-extern bool _debug_guardpage_enabled;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static inline unsigned int debug_guardpage_minorder(void)
{
@@ -2863,21 +2896,15 @@ static inline unsigned int debug_guardpage_minorder(void)
static inline bool debug_guardpage_enabled(void)
{
- return _debug_guardpage_enabled;
+ return static_branch_unlikely(&_debug_guardpage_enabled);
}
static inline bool page_is_guard(struct page *page)
{
- struct page_ext *page_ext;
-
if (!debug_guardpage_enabled())
return false;
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+ return PageGuard(page);
}
#else
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8ec38b11b361..1d1093474c1a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -329,7 +329,9 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
+#endif
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index d07992009265..c696c265f019 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -108,7 +108,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
bool __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p,
- struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9f8712a4b1a5..b848517da64c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -703,6 +703,7 @@ PAGEFLAG_FALSE(DoubleMap)
#define PG_offline 0x00000100
#define PG_kmemcg 0x00000200
#define PG_table 0x00000400
+#define PG_guard 0x00000800
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -754,6 +755,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg)
*/
PAGE_TYPE_OPS(Table, table)
+/*
+ * Marks guardpages used with debug_pagealloc.
+ */
+PAGE_TYPE_OPS(Guard, guard)
+
extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY);
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 280ae96dc4c3..1099c2fee20f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -50,7 +50,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
-int
+void
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype);
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index f84f167ec04c..09592951725c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -17,7 +17,6 @@ struct page_ext_operations {
#ifdef CONFIG_PAGE_EXTENSION
enum page_ext_flags {
- PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index fe0b29bf2df7..c7552459a15f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -383,8 +383,7 @@ extern int read_cache_pages(struct address_space *mapping,
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
- return read_cache_page(mapping, index, filler, data);
+ return read_cache_page(mapping, index, NULL, data);
}
/*
@@ -452,6 +451,9 @@ extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
+/*
+ * Return true if the page was successfully locked
+ */
static inline int trylock_page(struct page *page)
{
page = compound_head(page);
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 3c202a11a79e..01e8037023f7 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -66,13 +66,6 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
-static inline void *pfn_t_to_virt(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn)))
- return __va(pfn_t_to_phys(pfn));
- return NULL;
-}
-
static inline pfn_t page_to_pfn_t(struct page *page)
{
return pfn_to_pfn_t(page_to_pfn(page));
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9449b19c5f10..56c9c7eed34e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -16,6 +16,7 @@
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/percpu-refcount.h>
/*
@@ -115,6 +116,10 @@
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
+
+/* Slab deactivation flag */
+#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
+
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
@@ -151,8 +156,7 @@ void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
-void memcg_deactivate_kmem_caches(struct mem_cgroup *);
-void memcg_destroy_kmem_caches(struct mem_cgroup *);
+void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -184,6 +188,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
+size_t __ksize(const void *);
size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
@@ -641,11 +646,12 @@ struct memcg_cache_params {
struct mem_cgroup *memcg;
struct list_head children_node;
struct list_head kmem_caches_node;
+ struct percpu_ref refcnt;
- void (*deact_fn)(struct kmem_cache *);
+ void (*work_fn)(struct kmem_cache *);
union {
- struct rcu_head deact_rcu_head;
- struct work_struct deact_work;
+ struct rcu_head rcu_head;
+ struct work_struct work;
};
};
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4bfb5c4ac108..de2c67a33b7e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -148,7 +148,7 @@ struct zone;
* We always assume that blocks are of size PAGE_SIZE.
*/
struct swap_extent {
- struct list_head list;
+ struct rb_node rb_node;
pgoff_t start_page;
pgoff_t nr_pages;
sector_t start_block;
@@ -175,8 +175,9 @@ enum {
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
+ SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
- SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
@@ -247,8 +248,7 @@ struct swap_info_struct {
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
- struct swap_extent *curr_swap_extent;
- struct swap_extent first_swap_extent;
+ struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
@@ -460,7 +460,7 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
-extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
+extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
@@ -470,6 +470,12 @@ extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
+extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
+
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+ rcu_read_unlock();
+}
#else /* CONFIG_SWAP */
@@ -576,7 +582,7 @@ static inline int page_swapcount(struct page *page)
return 0;
}
-static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
+static inline int __swap_count(swp_entry_t entry)
{
return 0;
}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 51e131245379..9b21d0047710 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -72,10 +72,12 @@ extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
extern void __init vmalloc_init(void);
+extern unsigned long vmalloc_nr_pages(void);
#else
static inline void vmalloc_init(void)
{
}
+static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
extern void *vmalloc(unsigned long size);
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 61e6fddfb26f..6d28bc433c1c 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -17,7 +17,7 @@ struct vmpressure {
unsigned long tree_scanned;
unsigned long tree_reclaimed;
/* The lock is used to keep the scanned/reclaimed above in sync. */
- struct spinlock sr_lock;
+ spinlock_t sr_lock;
/* The list of vmpressure_event structs. */
struct list_head events;
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index a7e66ab11d1d..c23f91ae5fe8 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -29,7 +29,7 @@
#include <linux/types.h>
#include <linux/magic.h>
-
+#include <asm/byteorder.h>
#define NILFS_INODE_BMAP_SIZE 7
@@ -533,19 +533,19 @@ enum {
static inline void \
nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
- (1UL << NILFS_CHECKPOINT_##flag)); \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline void \
nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
~(1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline int \
nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
{ \
- return !!(le32_to_cpu(cp->cp_flags) & \
+ return !!(__le32_to_cpu(cp->cp_flags) & \
(1UL << NILFS_CHECKPOINT_##flag)); \
}
@@ -595,20 +595,20 @@ enum {
static inline void \
nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
{ \
- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
+ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
(1UL << NILFS_SEGMENT_USAGE_##flag));\
} \
static inline void \
nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
{ \
su->su_flags = \
- cpu_to_le32(le32_to_cpu(su->su_flags) & \
+ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
} \
static inline int \
nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
{ \
- return !!(le32_to_cpu(su->su_flags) & \
+ return !!(__le32_to_cpu(su->su_flags) & \
(1UL << NILFS_SEGMENT_USAGE_##flag)); \
}
@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
static inline void
nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
{
- su->su_lastmod = cpu_to_le64(0);
- su->su_nblocks = cpu_to_le32(0);
- su->su_flags = cpu_to_le32(0);
+ su->su_lastmod = __cpu_to_le64(0);
+ su->su_nblocks = __cpu_to_le32(0);
+ su->su_flags = __cpu_to_le32(0);
}
static inline int
nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
{
- return !le32_to_cpu(su->su_flags);
+ return !__le32_to_cpu(su->su_flags);
}
/**
diff --git a/init/main.c b/init/main.c
index 66a196c5e4c3..ff5803b0841c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -520,6 +520,29 @@ static inline void initcall_debug_enable(void)
}
#endif
+/* Report memory auto-initialization states for this boot. */
+static void __init report_meminit(void)
+{
+ const char *stack;
+
+ if (IS_ENABLED(CONFIG_INIT_STACK_ALL))
+ stack = "all";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
+ stack = "byref_all";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
+ stack = "byref";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
+ stack = "__user";
+ else
+ stack = "off";
+
+ pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
+ stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
+ want_init_on_free() ? "on" : "off");
+ if (want_init_on_free())
+ pr_info("mem auto-init: clearing system memory may take some time...\n");
+}
+
/*
* Set up kernel memory allocators
*/
@@ -530,6 +553,7 @@ static void __init mm_init(void)
* bigger than MAX_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
+ report_meminit();
mem_init();
kmem_cache_init();
pgtable_init();
diff --git a/kernel/iomem.c b/kernel/iomem.c
index 93c264444510..62c92e43aa0d 100644
--- a/kernel/iomem.c
+++ b/kernel/iomem.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL(memremap);
void memunmap(void *addr)
{
- if (is_vmalloc_addr(addr))
+ if (is_ioremap_addr(addr))
iounmap((void __iomem *) addr);
}
EXPORT_SYMBOL(memunmap);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index e3c593c38eff..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -7,16 +7,17 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
-#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kasan.h>
/*
* Note: test functions are marked noinline so that their names appear in
@@ -619,6 +620,95 @@ static noinline void __init kasan_strings(void)
strnlen(ptr, 1);
}
+static noinline void __init kasan_bitops(void)
+{
+ /*
+ * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
+ * this way we do not actually corrupt other memory.
+ */
+ long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ /*
+ * Below calls try to access bit within allocated memory; however, the
+ * below accesses are still out-of-bounds, since bitops are defined to
+ * operate on the whole long the bit is in.
+ */
+ pr_info("out-of-bounds in set_bit\n");
+ set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __set_bit\n");
+ __set_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit\n");
+ clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit\n");
+ __clear_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in clear_bit_unlock\n");
+ clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __clear_bit_unlock\n");
+ __clear_bit_unlock(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in change_bit\n");
+ change_bit(BITS_PER_LONG, bits);
+
+ pr_info("out-of-bounds in __change_bit\n");
+ __change_bit(BITS_PER_LONG, bits);
+
+ /*
+ * Below calls try to access bit beyond allocated memory.
+ */
+ pr_info("out-of-bounds in test_and_set_bit\n");
+ test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_set_bit\n");
+ __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_set_bit_lock\n");
+ test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_clear_bit\n");
+ test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_clear_bit\n");
+ __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_and_change_bit\n");
+ test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in __test_and_change_bit\n");
+ __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+ pr_info("out-of-bounds in test_bit\n");
+ (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
+
+#if defined(clear_bit_unlock_is_negative_byte)
+ pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
+ clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
+#endif
+ kfree(bits);
+}
+
+static noinline void __init kmalloc_double_kzfree(void)
+{
+ char *ptr;
+ size_t size = 16;
+
+ pr_info("double-free (kzfree)\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ kzfree(ptr);
+ kzfree(ptr);
+}
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -660,6 +750,8 @@ static int __init kmalloc_tests_init(void)
kasan_memchr();
kasan_memcmp();
kasan_strings();
+ kasan_bitops();
+ kmalloc_double_kzfree();
kasan_restore_multi_shot(multishot);
diff --git a/mm/Kconfig b/mm/Kconfig
index ef6efedc5921..0b4352557dd5 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -132,7 +132,8 @@ config HAVE_MEMBLOCK_NODE_MAP
config HAVE_MEMBLOCK_PHYS_MAP
bool
-config HAVE_GENERIC_GUP
+config HAVE_FAST_GUP
+ depends on MMU
bool
config ARCH_KEEP_MEMBLOCK
@@ -762,7 +763,20 @@ config GUP_BENCHMARK
See tools/testing/selftests/vm/gup_benchmark.c
+config GUP_GET_PTE_LOW_HIGH
+ bool
+
config ARCH_HAS_PTE_SPECIAL
bool
+#
+# Some architectures require a special hugepage directory format that is
+# required to support multiple hugepage sizes. For example a4fe3ce76
+# "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+# introduced it on powerpc. This allows for a more flexible hugepage
+# pagetable layouts.
+#
+config ARCH_HAS_HUGEPD
+ bool
+
endmenu
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index fa6d79281368..82b6a20898bd 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -12,19 +12,23 @@ config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
- select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
Depending on runtime enablement, this results in a small or large
slowdown, but helps to find certain types of memory corruption.
+ Also, the state of page tracking structures is checked more often as
+ pages are being allocated and freed, as unexpected state changes
+ often happen for same reasons as memory corruption (e.g. double free,
+ use-after-free).
+
For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
fill the pages with poison patterns after free_pages() and verify
- the patterns before alloc_pages(). Additionally,
- this option cannot be enabled in combination with hibernation as
- that would result in incorrect warnings of memory corruption after
- a resume because free pages are not saved to the suspend image.
+ the patterns before alloc_pages(). Additionally, this option cannot
+ be enabled in combination with hibernation as that would result in
+ incorrect warnings of memory corruption after a resume because free
+ pages are not saved to the suspend image.
By default this option will have a small overhead, e.g. by not
allowing the kernel mapping to be backed by large pages on some
diff --git a/mm/Makefile b/mm/Makefile
index ac5e5ba78874..dc0746ca1109 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -22,7 +22,7 @@ KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
mmu-y := nommu.o
-mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
+mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
pgtable-generic.o rmap.o vmalloc.o
@@ -39,7 +39,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ debug.o gup.o $(mmu-y)
# Give 'page_alloc' its own module-parameter namespace
page-alloc-y := page_alloc.o
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 8c94c89a6f7e..fe5d33060415 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -378,7 +378,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
#endif
spin_unlock_irqrestore(&pool->lock, flags);
- if (mem_flags & __GFP_ZERO)
+ if (want_init_on_alloc(mem_flags))
memset(retval, 0, pool->size);
return retval;
@@ -428,6 +428,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
}
offset = vaddr - page->vaddr;
+ if (want_init_on_free())
+ memset(vaddr, 0, pool->size);
#ifdef DMAPOOL_DEBUG
if ((dma - page->dma) != offset) {
spin_unlock_irqrestore(&pool->lock, flags);
diff --git a/mm/failslab.c b/mm/failslab.c
index ec5aad211c5b..f92fed91ac23 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -23,7 +23,8 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
if (gfpflags & __GFP_NOFAIL)
return false;
- if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
+ if (failslab.ignore_gfp_reclaim &&
+ (gfpflags & __GFP_DIRECT_RECLAIM))
return false;
if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
diff --git a/mm/filemap.c b/mm/filemap.c
index f1aa20ab8434..d0cf700bf201 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2504,10 +2504,8 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
*
* vma->vm_mm->mmap_sem must be held on entry.
*
- * If our return value has VM_FAULT_RETRY set, it's because
- * lock_page_or_retry() returned 0.
- * The mmap_sem has usually been released in this case.
- * See __lock_page_or_retry() for the exception.
+ * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
+ * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
*
* If our return value does not have VM_FAULT_RETRY set, the mmap_sem
* has not been released.
@@ -2825,7 +2823,11 @@ repeat:
}
filler:
- err = filler(data, page);
+ if (filler)
+ err = filler(data, page);
+ else
+ err = mapping->a_ops->readpage(data, page);
+
if (err < 0) {
put_page(page);
return ERR_PTR(err);
@@ -2915,7 +2917,8 @@ struct page *read_cache_page(struct address_space *mapping,
int (*filler)(void *, struct page *),
void *data)
{
- return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
+ return do_read_cache_page(mapping, index, filler, data,
+ mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page);
@@ -2936,9 +2939,7 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
- filler_t *filler = (filler_t *)mapping->a_ops->readpage;
-
- return do_read_cache_page(mapping, index, filler, NULL, gfp);
+ return do_read_cache_page(mapping, index, NULL, NULL, gfp);
}
EXPORT_SYMBOL(read_cache_page_gfp);
diff --git a/mm/gup.c b/mm/gup.c
index ddde097cf9e4..43b7d875de37 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -134,6 +134,7 @@ void put_user_pages(struct page **pages, unsigned long npages)
}
EXPORT_SYMBOL(put_user_pages);
+#ifdef CONFIG_MMU
static struct page *no_page_table(struct vm_area_struct *vma,
unsigned int flags)
{
@@ -515,7 +516,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
* an error pointer if there is a mapping to something not represented
* by a page descriptor (see also vm_normal_page()).
*/
-struct page *follow_page_mask(struct vm_area_struct *vma,
+static struct page *follow_page_mask(struct vm_area_struct *vma,
unsigned long address, unsigned int flags,
struct follow_page_context *ctx)
{
@@ -585,11 +586,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
- BUG_ON(pgd_none(*pgd));
+ if (pgd_none(*pgd))
+ return -EFAULT;
p4d = p4d_offset(pgd, address);
- BUG_ON(p4d_none(*p4d));
+ if (p4d_none(*p4d))
+ return -EFAULT;
pud = pud_offset(p4d, address);
- BUG_ON(pud_none(*pud));
+ if (pud_none(*pud))
+ return -EFAULT;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return -EFAULT;
@@ -1101,86 +1105,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
}
/*
- * We can leverage the VM_FAULT_RETRY functionality in the page fault
- * paths better by using either get_user_pages_locked() or
- * get_user_pages_unlocked().
- *
- * get_user_pages_locked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
- *
- * to:
- *
- * int locked = 1;
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages_locked(tsk, mm, ..., pages, &locked);
- * if (locked)
- * up_read(&mm->mmap_sem);
- */
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- /*
- * FIXME: Current FOLL_LONGTERM behavior is incompatible with
- * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
- * vmas. As there are no users of this flag in this call we simply
- * disallow this option for now.
- */
- if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
- return -EINVAL;
-
- return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, NULL, locked,
- gup_flags | FOLL_TOUCH);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-/*
- * get_user_pages_unlocked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
- *
- * with:
- *
- * get_user_pages_unlocked(tsk, mm, ..., pages);
- *
- * It is functionally equivalent to get_user_pages_fast so
- * get_user_pages_fast should be used instead if specific gup_flags
- * (e.g. FOLL_FORCE) are not required.
- */
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- struct mm_struct *mm = current->mm;
- int locked = 1;
- long ret;
-
- /*
- * FIXME: Current FOLL_LONGTERM behavior is incompatible with
- * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
- * vmas. As there are no users of this flag in this call we simply
- * disallow this option for now.
- */
- if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
- return -EINVAL;
-
- down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
- &locked, gup_flags | FOLL_TOUCH);
- if (locked)
- up_read(&mm->mmap_sem);
- return ret;
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
-/*
* get_user_pages_remote() - pin user pages in memory
* @tsk: the task_struct to use for page fault accounting, or
* NULL if faults are not to be recorded.
@@ -1256,6 +1180,198 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages_remote);
+/**
+ * populate_vma_page_range() - populate a range of pages in the vma.
+ * @vma: target vma
+ * @start: start address
+ * @end: end address
+ * @nonblocking:
+ *
+ * This takes care of mlocking the pages too if VM_LOCKED is set.
+ *
+ * return 0 on success, negative error code on error.
+ *
+ * vma->vm_mm->mmap_sem must be held.
+ *
+ * If @nonblocking is NULL, it may be held for read or write and will
+ * be unperturbed.
+ *
+ * If @nonblocking is non-NULL, it must held for read only and may be
+ * released. If it's released, *@nonblocking will be set to 0.
+ */
+long populate_vma_page_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, int *nonblocking)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long nr_pages = (end - start) / PAGE_SIZE;
+ int gup_flags;
+
+ VM_BUG_ON(start & ~PAGE_MASK);
+ VM_BUG_ON(end & ~PAGE_MASK);
+ VM_BUG_ON_VMA(start < vma->vm_start, vma);
+ VM_BUG_ON_VMA(end > vma->vm_end, vma);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+
+ gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
+ if (vma->vm_flags & VM_LOCKONFAULT)
+ gup_flags &= ~FOLL_POPULATE;
+ /*
+ * We want to touch writable mappings with a write fault in order
+ * to break COW, except for shared mappings because these don't COW
+ * and we would not want to dirty them for nothing.
+ */
+ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+ gup_flags |= FOLL_WRITE;
+
+ /*
+ * We want mlock to succeed for regions that have any permissions
+ * other than PROT_NONE.
+ */
+ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+ gup_flags |= FOLL_FORCE;
+
+ /*
+ * We made sure addr is within a VMA, so the following will
+ * not result in a stack expansion that recurses back here.
+ */
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
+ NULL, NULL, nonblocking);
+}
+
+/*
+ * __mm_populate - populate and/or mlock pages within a range of address space.
+ *
+ * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
+ * flags. VMAs must be already marked with the desired vm_flags, and
+ * mmap_sem must not be held.
+ */
+int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long end, nstart, nend;
+ struct vm_area_struct *vma = NULL;
+ int locked = 0;
+ long ret = 0;
+
+ end = start + len;
+
+ for (nstart = start; nstart < end; nstart = nend) {
+ /*
+ * We want to fault in pages for [nstart; end) address range.
+ * Find first corresponding VMA.
+ */
+ if (!locked) {
+ locked = 1;
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, nstart);
+ } else if (nstart >= vma->vm_end)
+ vma = vma->vm_next;
+ if (!vma || vma->vm_start >= end)
+ break;
+ /*
+ * Set [nstart; nend) to intersection of desired address
+ * range with the first VMA. Also, skip undesirable VMA types.
+ */
+ nend = min(end, vma->vm_end);
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ continue;
+ if (nstart < vma->vm_start)
+ nstart = vma->vm_start;
+ /*
+ * Now fault in a range of pages. populate_vma_page_range()
+ * double checks the vma flags, so that it won't mlock pages
+ * if the vma was already munlocked.
+ */
+ ret = populate_vma_page_range(vma, nstart, nend, &locked);
+ if (ret < 0) {
+ if (ignore_errors) {
+ ret = 0;
+ continue; /* continue at next VMA */
+ }
+ break;
+ }
+ nend = nstart + ret * PAGE_SIZE;
+ ret = 0;
+ }
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret; /* 0 or negative error code */
+}
+
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ struct page *page;
+
+ if (__get_user_pages(current, current->mm, addr, 1,
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+ NULL) < 1)
+ return NULL;
+ flush_cache_page(vma, addr, page_to_pfn(page));
+ return page;
+}
+#endif /* CONFIG_ELF_CORE */
+#else /* CONFIG_MMU */
+static long __get_user_pages_locked(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long nr_pages, struct page **pages,
+ struct vm_area_struct **vmas, int *locked,
+ unsigned int foll_flags)
+{
+ struct vm_area_struct *vma;
+ unsigned long vm_flags;
+ int i;
+
+ /* calculate required read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (foll_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (foll_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+ for (i = 0; i < nr_pages; i++) {
+ vma = find_vma(mm, start);
+ if (!vma)
+ goto finish_or_fault;
+
+ /* protect what we can, including chardevs */
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ goto finish_or_fault;
+
+ if (pages) {
+ pages[i] = virt_to_page(start);
+ if (pages[i])
+ get_page(pages[i]);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ start = (start + PAGE_SIZE) & PAGE_MASK;
+ }
+
+ return i;
+
+finish_or_fault:
+ return i ? : -EFAULT;
+}
+#endif /* !CONFIG_MMU */
+
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
@@ -1336,25 +1452,31 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
struct vm_area_struct **vmas,
unsigned int gup_flags)
{
- long i;
+ unsigned long i;
+ unsigned long step;
bool drain_allow = true;
bool migrate_allow = true;
LIST_HEAD(cma_page_list);
check_again:
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_pages;) {
+
+ struct page *head = compound_head(pages[i]);
+
+ /*
+ * gup may start from a tail page. Advance step by the left
+ * part.
+ */
+ step = (1 << compound_order(head)) - (pages[i] - head);
/*
* If we get a page from the CMA zone, since we are going to
* be pinning these entries, we might as well move them out
* of the CMA zone if possible.
*/
- if (is_migrate_cma_page(pages[i])) {
-
- struct page *head = compound_head(pages[i]);
-
- if (PageHuge(head)) {
+ if (is_migrate_cma_page(head)) {
+ if (PageHuge(head))
isolate_huge_page(head, &cma_page_list);
- } else {
+ else {
if (!PageLRU(head) && drain_allow) {
lru_add_drain_all();
drain_allow = false;
@@ -1369,6 +1491,8 @@ check_again:
}
}
}
+
+ i += step;
}
if (!list_empty(&cma_page_list)) {
@@ -1417,7 +1541,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
{
return nr_pages;
}
-#endif
+#endif /* CONFIG_CMA */
/*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -1503,155 +1627,88 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages);
-/**
- * populate_vma_page_range() - populate a range of pages in the vma.
- * @vma: target vma
- * @start: start address
- * @end: end address
- * @nonblocking:
- *
- * This takes care of mlocking the pages too if VM_LOCKED is set.
+/*
+ * We can leverage the VM_FAULT_RETRY functionality in the page fault
+ * paths better by using either get_user_pages_locked() or
+ * get_user_pages_unlocked().
*
- * return 0 on success, negative error code on error.
+ * get_user_pages_locked() is suitable to replace the form:
*
- * vma->vm_mm->mmap_sem must be held.
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
*
- * If @nonblocking is NULL, it may be held for read or write and will
- * be unperturbed.
+ * to:
*
- * If @nonblocking is non-NULL, it must held for read only and may be
- * released. If it's released, *@nonblocking will be set to 0.
+ * int locked = 1;
+ * down_read(&mm->mmap_sem);
+ * do_something()
+ * get_user_pages_locked(tsk, mm, ..., pages, &locked);
+ * if (locked)
+ * up_read(&mm->mmap_sem);
*/
-long populate_vma_page_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, int *nonblocking)
+long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked)
{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long nr_pages = (end - start) / PAGE_SIZE;
- int gup_flags;
-
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(end & ~PAGE_MASK);
- VM_BUG_ON_VMA(start < vma->vm_start, vma);
- VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
-
- gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
- if (vma->vm_flags & VM_LOCKONFAULT)
- gup_flags &= ~FOLL_POPULATE;
- /*
- * We want to touch writable mappings with a write fault in order
- * to break COW, except for shared mappings because these don't COW
- * and we would not want to dirty them for nothing.
- */
- if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
- gup_flags |= FOLL_WRITE;
-
/*
- * We want mlock to succeed for regions that have any permissions
- * other than PROT_NONE.
+ * FIXME: Current FOLL_LONGTERM behavior is incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. As there are no users of this flag in this call we simply
+ * disallow this option for now.
*/
- if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
- gup_flags |= FOLL_FORCE;
+ if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+ return -EINVAL;
- /*
- * We made sure addr is within a VMA, so the following will
- * not result in a stack expansion that recurses back here.
- */
- return __get_user_pages(current, mm, start, nr_pages, gup_flags,
- NULL, NULL, nonblocking);
+ return __get_user_pages_locked(current, current->mm, start, nr_pages,
+ pages, NULL, locked,
+ gup_flags | FOLL_TOUCH);
}
+EXPORT_SYMBOL(get_user_pages_locked);
/*
- * __mm_populate - populate and/or mlock pages within a range of address space.
+ * get_user_pages_unlocked() is suitable to replace the form:
*
- * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
- * flags. VMAs must be already marked with the desired vm_flags, and
- * mmap_sem must not be held.
+ * down_read(&mm->mmap_sem);
+ * get_user_pages(tsk, mm, ..., pages, NULL);
+ * up_read(&mm->mmap_sem);
+ *
+ * with:
+ *
+ * get_user_pages_unlocked(tsk, mm, ..., pages);
+ *
+ * It is functionally equivalent to get_user_pages_fast so
+ * get_user_pages_fast should be used instead if specific gup_flags
+ * (e.g. FOLL_FORCE) are not required.
*/
-int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
+ struct page **pages, unsigned int gup_flags)
{
struct mm_struct *mm = current->mm;
- unsigned long end, nstart, nend;
- struct vm_area_struct *vma = NULL;
- int locked = 0;
- long ret = 0;
+ int locked = 1;
+ long ret;
- end = start + len;
+ /*
+ * FIXME: Current FOLL_LONGTERM behavior is incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. As there are no users of this flag in this call we simply
+ * disallow this option for now.
+ */
+ if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+ return -EINVAL;
- for (nstart = start; nstart < end; nstart = nend) {
- /*
- * We want to fault in pages for [nstart; end) address range.
- * Find first corresponding VMA.
- */
- if (!locked) {
- locked = 1;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, nstart);
- } else if (nstart >= vma->vm_end)
- vma = vma->vm_next;
- if (!vma || vma->vm_start >= end)
- break;
- /*
- * Set [nstart; nend) to intersection of desired address
- * range with the first VMA. Also, skip undesirable VMA types.
- */
- nend = min(end, vma->vm_end);
- if (vma->vm_flags & (VM_IO | VM_PFNMAP))
- continue;
- if (nstart < vma->vm_start)
- nstart = vma->vm_start;
- /*
- * Now fault in a range of pages. populate_vma_page_range()
- * double checks the vma flags, so that it won't mlock pages
- * if the vma was already munlocked.
- */
- ret = populate_vma_page_range(vma, nstart, nend, &locked);
- if (ret < 0) {
- if (ignore_errors) {
- ret = 0;
- continue; /* continue at next VMA */
- }
- break;
- }
- nend = nstart + ret * PAGE_SIZE;
- ret = 0;
- }
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+ &locked, gup_flags | FOLL_TOUCH);
if (locked)
up_read(&mm->mmap_sem);
- return ret; /* 0 or negative error code */
-}
-
-/**
- * get_dump_page() - pin user page in memory while writing it to core dump
- * @addr: user address
- *
- * Returns struct page pointer of user page pinned for dump,
- * to be freed afterwards by put_page().
- *
- * Returns NULL on any kind of failure - a hole must then be inserted into
- * the corefile, to preserve alignment with its headers; and also returns
- * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
- * allowing a hole to be left in the corefile to save diskspace.
- *
- * Called without mmap_sem, but after all other threads have been killed.
- */
-#ifdef CONFIG_ELF_CORE
-struct page *get_dump_page(unsigned long addr)
-{
- struct vm_area_struct *vma;
- struct page *page;
-
- if (__get_user_pages(current, current->mm, addr, 1,
- FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
- NULL) < 1)
- return NULL;
- flush_cache_page(vma, addr, page_to_pfn(page));
- return page;
+ return ret;
}
-#endif /* CONFIG_ELF_CORE */
+EXPORT_SYMBOL(get_user_pages_unlocked);
/*
- * Generic Fast GUP
+ * Fast GUP
*
* get_user_pages_fast attempts to pin user pages by walking the page
* tables directly and avoids taking locks. Thus the walker needs to be
@@ -1683,20 +1740,64 @@ struct page *get_dump_page(unsigned long addr)
*
* This code is based heavily on the PowerPC implementation by Nick Piggin.
*/
-#ifdef CONFIG_HAVE_GENERIC_GUP
+#ifdef CONFIG_HAVE_FAST_GUP
+#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
+/*
+ * WARNING: only to be used in the get_user_pages_fast() implementation.
+ *
+ * With get_user_pages_fast(), we walk down the pagetables without taking any
+ * locks. For this we would like to load the pointers atomically, but sometimes
+ * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
+ * we do have is the guarantee that a PTE will only either go from not present
+ * to present, or present to not present or both -- it will not switch to a
+ * completely different present page without a TLB flush in between; something
+ * that we are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
+ * We load pte_high *after* loading pte_low, which ensures we don't see an older
+ * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
+ * picked up a changed pte high. We might have gotten rubbish values from
+ * pte_low and pte_high, but we are guaranteed that pte_low will not have the
+ * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
+ * operates on present ptes we're safe.
+ */
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+ pte_t pte;
-#ifndef gup_get_pte
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
/*
- * We assume that the PTE can be read atomically. If this is not the case for
- * your architecture, please provide the helper.
+ * We require that the PTE can be read atomically.
*/
static inline pte_t gup_get_pte(pte_t *ptep)
{
return READ_ONCE(*ptep);
}
-#endif
+#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
+static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+ struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
@@ -1877,6 +1978,90 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
}
#endif
+#ifdef CONFIG_ARCH_HAS_HUGEPD
+static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
+ unsigned long sz)
+{
+ unsigned long __boundary = (addr + sz) & ~(sz-1);
+ return (__boundary - 1 < end - 1) ? __boundary : end;
+}
+
+static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long pte_end;
+ struct page *head, *page;
+ pte_t pte;
+ int refs;
+
+ pte_end = (addr + sz) & ~(sz-1);
+ if (pte_end < end)
+ end = pte_end;
+
+ pte = READ_ONCE(*ptep);
+
+ if (!pte_access_permitted(pte, write))
+ return 0;
+
+ /* hugepages are never "special" */
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+
+ page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ head = try_get_compound_head(head, refs);
+ if (!head) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ /* Could be optimized better */
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ SetPageReferenced(head);
+ return 1;
+}
+
+static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned int pdshift, unsigned long end, int write,
+ struct page **pages, int *nr)
+{
+ pte_t *ptep;
+ unsigned long sz = 1UL << hugepd_shift(hugepd);
+ unsigned long next;
+
+ ptep = hugepte_offset(hugepd, addr, pdshift);
+ do {
+ next = hugepte_addr_end(addr, end, sz);
+ if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ return 0;
+ } while (ptep++, addr = next, addr != end);
+
+ return 1;
+}
+#else
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end, int write,
+ struct page **pages, int *nr)
+{
+ return 0;
+}
+#endif /* CONFIG_ARCH_HAS_HUGEPD */
+
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
unsigned long end, unsigned int flags, struct page **pages, int *nr)
{
@@ -2117,19 +2302,21 @@ static void gup_pgd_range(unsigned long addr, unsigned long end,
return;
} while (pgdp++, addr = next, addr != end);
}
+#else
+static inline void gup_pgd_range(unsigned long addr, unsigned long end,
+ unsigned int flags, struct page **pages, int *nr)
+{
+}
+#endif /* CONFIG_HAVE_FAST_GUP */
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
-bool gup_fast_permitted(unsigned long start, int nr_pages)
+static bool gup_fast_permitted(unsigned long start, unsigned long end)
{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- return end >= start;
+ return true;
}
#endif
@@ -2138,6 +2325,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages)
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
+ *
+ * If the architecture does not support this function, simply return with no
+ * pages pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
@@ -2146,10 +2336,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
unsigned long flags;
int nr = 0;
- start &= PAGE_MASK;
+ start = untagged_addr(start) & PAGE_MASK;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
+ if (end <= start)
+ return 0;
if (unlikely(!access_ok((void __user *)start, len)))
return 0;
@@ -2165,7 +2357,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting.
*/
- if (gup_fast_permitted(start, nr_pages)) {
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+ gup_fast_permitted(start, end)) {
local_irq_save(flags);
gup_pgd_range(start, end, write ? FOLL_WRITE : 0, pages, &nr);
local_irq_restore(flags);
@@ -2173,6 +2366,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
return nr;
}
+EXPORT_SYMBOL_GPL(__get_user_pages_fast);
static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
@@ -2219,18 +2413,21 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned long addr, len, end;
int nr = 0, ret = 0;
- start &= PAGE_MASK;
+ if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
+ return -EINVAL;
+
+ start = untagged_addr(start) & PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (nr_pages <= 0)
+ if (end <= start)
return 0;
-
if (unlikely(!access_ok((void __user *)start, len)))
return -EFAULT;
- if (gup_fast_permitted(start, nr_pages)) {
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) &&
+ gup_fast_permitted(start, end)) {
local_irq_disable();
gup_pgd_range(addr, end, gup_flags, pages, &nr);
local_irq_enable();
@@ -2256,5 +2453,4 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
return ret;
}
-
-#endif /* CONFIG_HAVE_GENERIC_GUP */
+EXPORT_SYMBOL_GPL(get_user_pages_fast);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 242fdc01aaa9..2277b82902d8 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -14,8 +14,6 @@
*
*/
-#define __KASAN_INTERNAL
-
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -89,17 +87,17 @@ void kasan_disable_current(void)
current->kasan_depth--;
}
-void kasan_check_read(const volatile void *p, unsigned int size)
+bool __kasan_check_read(const volatile void *p, unsigned int size)
{
- check_memory_region((unsigned long)p, size, false, _RET_IP_);
+ return check_memory_region((unsigned long)p, size, false, _RET_IP_);
}
-EXPORT_SYMBOL(kasan_check_read);
+EXPORT_SYMBOL(__kasan_check_read);
-void kasan_check_write(const volatile void *p, unsigned int size)
+bool __kasan_check_write(const volatile void *p, unsigned int size)
{
- check_memory_region((unsigned long)p, size, true, _RET_IP_);
+ return check_memory_region((unsigned long)p, size, true, _RET_IP_);
}
-EXPORT_SYMBOL(kasan_check_write);
+EXPORT_SYMBOL(__kasan_check_write);
#undef memset
void *memset(void *addr, int c, size_t len)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 504c79363a34..616f9dd82d12 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -166,29 +166,30 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
return memory_is_poisoned_n(addr, size);
}
-static __always_inline void check_memory_region_inline(unsigned long addr,
+static __always_inline bool check_memory_region_inline(unsigned long addr,
size_t size, bool write,
unsigned long ret_ip)
{
if (unlikely(size == 0))
- return;
+ return true;
if (unlikely((void *)addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
kasan_report(addr, size, write, ret_ip);
- return;
+ return false;
}
if (likely(!memory_is_poisoned(addr, size)))
- return;
+ return true;
kasan_report(addr, size, write, ret_ip);
+ return false;
}
-void check_memory_region(unsigned long addr, size_t size, bool write,
+bool check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip)
{
- check_memory_region_inline(addr, size, write, ret_ip);
+ return check_memory_region_inline(addr, size, write, ret_ip);
}
void kasan_cache_shrink(struct kmem_cache *cache)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3ce956efa0cb..014f19e76247 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -43,6 +43,11 @@
#define KASAN_ALLOCA_REDZONE_SIZE 32
+/*
+ * Stack frame marker (compiler ABI).
+ */
+#define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
+
/* Don't break randconfig/all*config builds */
#ifndef KASAN_ABI_VERSION
#define KASAN_ABI_VERSION 1
@@ -123,7 +128,15 @@ static inline bool addr_has_shadow(const void *addr)
void kasan_poison_shadow(const void *address, size_t size, u8 value);
-void check_memory_region(unsigned long addr, size_t size, bool write,
+/**
+ * check_memory_region - Check memory region, and report if invalid access.
+ * @addr: the accessed address
+ * @size: the accessed size
+ * @write: true if access is a write access
+ * @ret_ip: return address
+ * @return: true if access was valid, false if invalid
+ */
+bool check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip);
void *find_first_bad_addr(void *addr, size_t size);
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 03a443579386..0e5f965f1882 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/kasan.h>
#include <linux/module.h>
+#include <linux/sched/task_stack.h>
#include <asm/sections.h>
@@ -181,6 +182,168 @@ static inline bool init_task_stack_addr(const void *addr)
sizeof(init_thread_union.stack));
}
+static bool __must_check tokenize_frame_descr(const char **frame_descr,
+ char *token, size_t max_tok_len,
+ unsigned long *value)
+{
+ const char *sep = strchr(*frame_descr, ' ');
+
+ if (sep == NULL)
+ sep = *frame_descr + strlen(*frame_descr);
+
+ if (token != NULL) {
+ const size_t tok_len = sep - *frame_descr;
+
+ if (tok_len + 1 > max_tok_len) {
+ pr_err("KASAN internal error: frame description too long: %s\n",
+ *frame_descr);
+ return false;
+ }
+
+ /* Copy token (+ 1 byte for '\0'). */
+ strlcpy(token, *frame_descr, tok_len + 1);
+ }
+
+ /* Advance frame_descr past separator. */
+ *frame_descr = sep + 1;
+
+ if (value != NULL && kstrtoul(token, 10, value)) {
+ pr_err("KASAN internal error: not a valid number: %s\n", token);
+ return false;
+ }
+
+ return true;
+}
+
+static void print_decoded_frame_descr(const char *frame_descr)
+{
+ /*
+ * We need to parse the following string:
+ * "n alloc_1 alloc_2 ... alloc_n"
+ * where alloc_i looks like
+ * "offset size len name"
+ * or "offset size len name:line".
+ */
+
+ char token[64];
+ unsigned long num_objects;
+
+ if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
+ &num_objects))
+ return;
+
+ pr_err("\n");
+ pr_err("this frame has %lu %s:\n", num_objects,
+ num_objects == 1 ? "object" : "objects");
+
+ while (num_objects--) {
+ unsigned long offset;
+ unsigned long size;
+
+ /* access offset */
+ if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
+ &offset))
+ return;
+ /* access size */
+ if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
+ &size))
+ return;
+ /* name length (unused) */
+ if (!tokenize_frame_descr(&frame_descr, NULL, 0, NULL))
+ return;
+ /* object name */
+ if (!tokenize_frame_descr(&frame_descr, token, sizeof(token),
+ NULL))
+ return;
+
+ /* Strip line number; without filename it's not very helpful. */
+ strreplace(token, ':', '\0');
+
+ /* Finally, print object information. */
+ pr_err(" [%lu, %lu) '%s'", offset, offset + size, token);
+ }
+}
+
+static bool __must_check get_address_stack_frame_info(const void *addr,
+ unsigned long *offset,
+ const char **frame_descr,
+ const void **frame_pc)
+{
+ unsigned long aligned_addr;
+ unsigned long mem_ptr;
+ const u8 *shadow_bottom;
+ const u8 *shadow_ptr;
+ const unsigned long *frame;
+
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_STACK_GROWSUP));
+
+ /*
+ * NOTE: We currently only support printing frame information for
+ * accesses to the task's own stack.
+ */
+ if (!object_is_on_stack(addr))
+ return false;
+
+ aligned_addr = round_down((unsigned long)addr, sizeof(long));
+ mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
+ shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
+ shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
+
+ while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
+ shadow_ptr--;
+ mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+ }
+
+ while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
+ shadow_ptr--;
+ mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+ }
+
+ if (shadow_ptr < shadow_bottom)
+ return false;
+
+ frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
+ if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
+ pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
+ frame[0]);
+ return false;
+ }
+
+ *offset = (unsigned long)addr - (unsigned long)frame;
+ *frame_descr = (const char *)frame[1];
+ *frame_pc = (void *)frame[2];
+
+ return true;
+}
+
+static void print_address_stack_frame(const void *addr)
+{
+ unsigned long offset;
+ const char *frame_descr;
+ const void *frame_pc;
+
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ return;
+
+ if (!get_address_stack_frame_info(addr, &offset, &frame_descr,
+ &frame_pc))
+ return;
+
+ /*
+ * get_address_stack_frame_info only returns true if the given addr is
+ * on the current task's stack.
+ */
+ pr_err("\n");
+ pr_err("addr %px is located in stack of task %s/%d at offset %lu in frame:\n",
+ addr, current->comm, task_pid_nr(current), offset);
+ pr_err(" %pS\n", frame_pc);
+
+ if (!frame_descr)
+ return;
+
+ print_decoded_frame_descr(frame_descr);
+}
+
static void print_address_description(void *addr)
{
struct page *page = addr_to_page(addr);
@@ -204,6 +367,8 @@ static void print_address_description(void *addr)
pr_err("The buggy address belongs to the page:\n");
dump_page(page, "kasan: bad access detected");
}
+
+ print_address_stack_frame(addr);
}
static bool row_is_guilty(const void *row, const void *guilty)
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 63fca3172659..0e987c9ca052 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -76,7 +76,7 @@ void *kasan_reset_tag(const void *addr)
return reset_tag(addr);
}
-void check_memory_region(unsigned long addr, size_t size, bool write,
+bool check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip)
{
u8 tag;
@@ -84,7 +84,7 @@ void check_memory_region(unsigned long addr, size_t size, bool write,
void *untagged_addr;
if (unlikely(size == 0))
- return;
+ return true;
tag = get_tag((const void *)addr);
@@ -106,22 +106,24 @@ void check_memory_region(unsigned long addr, size_t size, bool write,
* set to KASAN_TAG_KERNEL (0xFF)).
*/
if (tag == KASAN_TAG_KERNEL)
- return;
+ return true;
untagged_addr = reset_tag((const void *)addr);
if (unlikely(untagged_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
kasan_report(addr, size, write, ret_ip);
- return;
+ return false;
}
shadow_first = kasan_mem_to_shadow(untagged_addr);
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
if (*shadow != tag) {
kasan_report(addr, size, write, ret_ip);
- return;
+ return false;
}
}
+
+ return true;
}
#define DEFINE_HWASAN_LOAD_STORE(size) \
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9dd581d11565..aa8f4fa93ca3 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -575,7 +575,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
if (in_irq()) {
object->pid = 0;
strncpy(object->comm, "hardirq", sizeof(object->comm));
- } else if (in_softirq()) {
+ } else if (in_serving_softirq()) {
object->pid = 0;
strncpy(object->comm, "softirq", sizeof(object->comm));
} else {
@@ -1866,7 +1866,7 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
}
if (!kmemleak_enabled) {
- ret = -EBUSY;
+ ret = -EPERM;
goto out;
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 927d85be32f6..0f1f6b06b7f3 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/memcontrol.h>
+#include "slab.h"
#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus);
@@ -63,7 +64,7 @@ static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
if (!memcg_kmem_enabled())
return NULL;
page = virt_to_head_page(ptr);
- return page->mem_cgroup;
+ return memcg_from_slab_page(page);
}
static inline struct list_lru_one *
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ba9138a4a1de..4f05735b02d3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -57,6 +57,7 @@
#include <linux/lockdep.h>
#include <linux/file.h>
#include <linux/tracehook.h>
+#include <linux/seq_buf.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -485,7 +486,10 @@ ino_t page_cgroup_ino(struct page *page)
unsigned long ino = 0;
rcu_read_lock();
- memcg = READ_ONCE(page->mem_cgroup);
+ if (PageHead(page) && PageSlab(page))
+ memcg = memcg_from_slab_page(page);
+ else
+ memcg = READ_ONCE(page->mem_cgroup);
while (memcg && !(memcg->css.flags & CSS_ONLINE))
memcg = parent_mem_cgroup(memcg);
if (memcg)
@@ -1163,7 +1167,7 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
struct css_task_iter it;
struct task_struct *task;
- css_task_iter_start(&iter->css, 0, &it);
+ css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
while (!ret && (task = css_task_iter_next(&it)))
ret = fn(task, arg);
css_task_iter_end(&it);
@@ -1255,32 +1259,6 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
*lru_size += nr_pages;
}
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
-{
- struct mem_cgroup *task_memcg;
- struct task_struct *p;
- bool ret;
-
- p = find_lock_task_mm(task);
- if (p) {
- task_memcg = get_mem_cgroup_from_mm(p->mm);
- task_unlock(p);
- } else {
- /*
- * All threads may have already detached their mm's, but the oom
- * killer still needs to detect if they have already been oom
- * killed to prevent needlessly killing additional tasks.
- */
- rcu_read_lock();
- task_memcg = mem_cgroup_from_task(task);
- css_get(&task_memcg->css);
- rcu_read_unlock();
- }
- ret = mem_cgroup_is_descendant(task_memcg, memcg);
- css_put(&task_memcg->css);
- return ret;
-}
-
/**
* mem_cgroup_margin - calculate chargeable space of a memory cgroup
* @memcg: the memory cgroup
@@ -1356,27 +1334,114 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
return false;
}
-static const unsigned int memcg1_stats[] = {
- MEMCG_CACHE,
- MEMCG_RSS,
- MEMCG_RSS_HUGE,
- NR_SHMEM,
- NR_FILE_MAPPED,
- NR_FILE_DIRTY,
- NR_WRITEBACK,
- MEMCG_SWAP,
-};
+static char *memory_stat_format(struct mem_cgroup *memcg)
+{
+ struct seq_buf s;
+ int i;
-static const char *const memcg1_stat_names[] = {
- "cache",
- "rss",
- "rss_huge",
- "shmem",
- "mapped_file",
- "dirty",
- "writeback",
- "swap",
-};
+ seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
+ if (!s.buffer)
+ return NULL;
+
+ /*
+ * Provide statistics on the state of the memory subsystem as
+ * well as cumulative event counters that show past behavior.
+ *
+ * This list is ordered following a combination of these gradients:
+ * 1) generic big picture -> specifics and details
+ * 2) reflecting userspace activity -> reflecting kernel heuristics
+ *
+ * Current memory state:
+ */
+
+ seq_buf_printf(&s, "anon %llu\n",
+ (u64)memcg_page_state(memcg, MEMCG_RSS) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "file %llu\n",
+ (u64)memcg_page_state(memcg, MEMCG_CACHE) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "kernel_stack %llu\n",
+ (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
+ 1024);
+ seq_buf_printf(&s, "slab %llu\n",
+ (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
+ memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "sock %llu\n",
+ (u64)memcg_page_state(memcg, MEMCG_SOCK) *
+ PAGE_SIZE);
+
+ seq_buf_printf(&s, "shmem %llu\n",
+ (u64)memcg_page_state(memcg, NR_SHMEM) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "file_mapped %llu\n",
+ (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "file_dirty %llu\n",
+ (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "file_writeback %llu\n",
+ (u64)memcg_page_state(memcg, NR_WRITEBACK) *
+ PAGE_SIZE);
+
+ /*
+ * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
+ * with the NR_ANON_THP vm counter, but right now it's a pain in the
+ * arse because it requires migrating the work out of rmap to a place
+ * where the page->mem_cgroup is set up and stable.
+ */
+ seq_buf_printf(&s, "anon_thp %llu\n",
+ (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
+ PAGE_SIZE);
+
+ for (i = 0; i < NR_LRU_LISTS; i++)
+ seq_buf_printf(&s, "%s %llu\n", mem_cgroup_lru_names[i],
+ (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
+ PAGE_SIZE);
+
+ seq_buf_printf(&s, "slab_reclaimable %llu\n",
+ (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
+ PAGE_SIZE);
+ seq_buf_printf(&s, "slab_unreclaimable %llu\n",
+ (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
+ PAGE_SIZE);
+
+ /* Accumulated memory events */
+
+ seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT));
+ seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT));
+
+ seq_buf_printf(&s, "workingset_refault %lu\n",
+ memcg_page_state(memcg, WORKINGSET_REFAULT));
+ seq_buf_printf(&s, "workingset_activate %lu\n",
+ memcg_page_state(memcg, WORKINGSET_ACTIVATE));
+ seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
+ memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
+
+ seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL));
+ seq_buf_printf(&s, "pgscan %lu\n",
+ memcg_events(memcg, PGSCAN_KSWAPD) +
+ memcg_events(memcg, PGSCAN_DIRECT));
+ seq_buf_printf(&s, "pgsteal %lu\n",
+ memcg_events(memcg, PGSTEAL_KSWAPD) +
+ memcg_events(memcg, PGSTEAL_DIRECT));
+ seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE));
+ seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE));
+ seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE));
+ seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED));
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ seq_buf_printf(&s, "thp_fault_alloc %lu\n",
+ memcg_events(memcg, THP_FAULT_ALLOC));
+ seq_buf_printf(&s, "thp_collapse_alloc %lu\n",
+ memcg_events(memcg, THP_COLLAPSE_ALLOC));
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ /* The above should easily fit into one page */
+ WARN_ON_ONCE(seq_buf_has_overflowed(&s));
+
+ return s.buffer;
+}
#define K(x) ((x) << (PAGE_SHIFT-10))
/**
@@ -1411,39 +1476,32 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *
*/
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
- struct mem_cgroup *iter;
- unsigned int i;
+ char *buf;
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
K((u64)memcg->memory.max), memcg->memory.failcnt);
- pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
- K((u64)page_counter_read(&memcg->memsw)),
- K((u64)memcg->memsw.max), memcg->memsw.failcnt);
- pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
- K((u64)page_counter_read(&memcg->kmem)),
- K((u64)memcg->kmem.max), memcg->kmem.failcnt);
-
- for_each_mem_cgroup_tree(iter, memcg) {
- pr_info("Memory cgroup stats for ");
- pr_cont_cgroup_path(iter->css.cgroup);
- pr_cont(":");
-
- for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
- if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
- continue;
- pr_cont(" %s:%luKB", memcg1_stat_names[i],
- K(memcg_page_state_local(iter,
- memcg1_stats[i])));
- }
-
- for (i = 0; i < NR_LRU_LISTS; i++)
- pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
- K(memcg_page_state_local(iter,
- NR_LRU_BASE + i)));
-
- pr_cont("\n");
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
+ K((u64)page_counter_read(&memcg->swap)),
+ K((u64)memcg->swap.max), memcg->swap.failcnt);
+ else {
+ pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
+ K((u64)page_counter_read(&memcg->memsw)),
+ K((u64)memcg->memsw.max), memcg->memsw.failcnt);
+ pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
+ K((u64)page_counter_read(&memcg->kmem)),
+ K((u64)memcg->kmem.max), memcg->kmem.failcnt);
}
+
+ pr_info("Memory cgroup stats for ");
+ pr_cont_cgroup_path(memcg->css.cgroup);
+ pr_cont(":");
+ buf = memory_stat_format(memcg);
+ if (!buf)
+ return;
+ pr_info("%s", buf);
+ kfree(buf);
}
/*
@@ -2279,7 +2337,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned long nr_reclaimed;
bool may_swap = true;
bool drained = false;
- bool oomed = false;
enum oom_status oom_status;
if (mem_cgroup_is_root(memcg))
@@ -2366,7 +2423,7 @@ retry:
if (nr_retries--)
goto retry;
- if (gfp_mask & __GFP_RETRY_MAYFAIL && oomed)
+ if (gfp_mask & __GFP_RETRY_MAYFAIL)
goto nomem;
if (gfp_mask & __GFP_NOFAIL)
@@ -2385,7 +2442,6 @@ retry:
switch (oom_status) {
case OOM_SUCCESS:
nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
- oomed = true;
goto retry;
case OOM_FAILED:
goto force;
@@ -2588,12 +2644,13 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
{
struct memcg_kmem_cache_create_work *cw;
+ if (!css_tryget_online(&memcg->css))
+ return;
+
cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
if (!cw)
return;
- css_get(&memcg->css);
-
cw->memcg = memcg;
cw->cachep = cachep;
INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
@@ -2628,6 +2685,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
{
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
+ struct memcg_cache_array *arr;
int kmemcg_id;
VM_BUG_ON(!is_root_cache(cachep));
@@ -2635,14 +2693,28 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
if (memcg_kmem_bypass())
return cachep;
- memcg = get_mem_cgroup_from_current();
+ rcu_read_lock();
+
+ if (unlikely(current->active_memcg))
+ memcg = current->active_memcg;
+ else
+ memcg = mem_cgroup_from_task(current);
+
+ if (!memcg || memcg == root_mem_cgroup)
+ goto out_unlock;
+
kmemcg_id = READ_ONCE(memcg->kmemcg_id);
if (kmemcg_id < 0)
- goto out;
+ goto out_unlock;
- memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
- if (likely(memcg_cachep))
- return memcg_cachep;
+ arr = rcu_dereference(cachep->memcg_params.memcg_caches);
+
+ /*
+ * Make sure we will access the up-to-date value. The code updating
+ * memcg_caches issues a write barrier to match the data dependency
+ * barrier inside READ_ONCE() (see memcg_create_kmem_cache()).
+ */
+ memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]);
/*
* If we are in a safe context (can wait, and not in interrupt
@@ -2655,10 +2727,20 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
* memcg_create_kmem_cache, this means no further allocation
* could happen with the slab_mutex held. So it's better to
* defer everything.
+ *
+ * If the memcg is dying or memcg_cache is about to be released,
+ * don't bother creating new kmem_caches. Because memcg_cachep
+ * is ZEROed as the fist step of kmem offlining, we don't need
+ * percpu_ref_tryget_live() here. css_tryget_online() check in
+ * memcg_schedule_kmem_cache_create() will prevent us from
+ * creation of a new kmem_cache.
*/
- memcg_schedule_kmem_cache_create(memcg, cachep);
-out:
- css_put(&memcg->css);
+ if (unlikely(!memcg_cachep))
+ memcg_schedule_kmem_cache_create(memcg, cachep);
+ else if (percpu_ref_tryget(&memcg_cachep->memcg_params.refcnt))
+ cachep = memcg_cachep;
+out_unlock:
+ rcu_read_unlock();
return cachep;
}
@@ -2669,7 +2751,7 @@ out:
void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
if (!is_root_cache(cachep))
- css_put(&cachep->memcg_params.memcg->css);
+ percpu_ref_put(&cachep->memcg_params.refcnt);
}
/**
@@ -2697,9 +2779,6 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
cancel_charge(memcg, nr_pages);
return -ENOMEM;
}
-
- page->mem_cgroup = memcg;
-
return 0;
}
@@ -2722,12 +2801,30 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
memcg = get_mem_cgroup_from_current();
if (!mem_cgroup_is_root(memcg)) {
ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
- if (!ret)
+ if (!ret) {
+ page->mem_cgroup = memcg;
__SetPageKmemcg(page);
+ }
}
css_put(&memcg->css);
return ret;
}
+
+/**
+ * __memcg_kmem_uncharge_memcg: uncharge a kmem page
+ * @memcg: memcg to uncharge
+ * @nr_pages: number of pages to uncharge
+ */
+void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ page_counter_uncharge(&memcg->kmem, nr_pages);
+
+ page_counter_uncharge(&memcg->memory, nr_pages);
+ if (do_memsw_account())
+ page_counter_uncharge(&memcg->memsw, nr_pages);
+}
/**
* __memcg_kmem_uncharge: uncharge a kmem page
* @page: page to uncharge
@@ -2742,14 +2839,7 @@ void __memcg_kmem_uncharge(struct page *page, int order)
return;
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
-
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- page_counter_uncharge(&memcg->kmem, nr_pages);
-
- page_counter_uncharge(&memcg->memory, nr_pages);
- if (do_memsw_account())
- page_counter_uncharge(&memcg->memsw, nr_pages);
-
+ __memcg_kmem_uncharge_memcg(memcg, nr_pages);
page->mem_cgroup = NULL;
/* slab pages do not have PageKmemcg flag set */
@@ -3168,15 +3258,15 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
*/
memcg->kmem_state = KMEM_ALLOCATED;
- memcg_deactivate_kmem_caches(memcg);
-
- kmemcg_id = memcg->kmemcg_id;
- BUG_ON(kmemcg_id < 0);
-
parent = parent_mem_cgroup(memcg);
if (!parent)
parent = root_mem_cgroup;
+ memcg_deactivate_kmem_caches(memcg, parent);
+
+ kmemcg_id = memcg->kmemcg_id;
+ BUG_ON(kmemcg_id < 0);
+
/*
* Change kmemcg_id of this cgroup and all its descendants to the
* parent's id, and then move all entries from this cgroup's list_lrus
@@ -3207,9 +3297,8 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
memcg_offline_kmem(memcg);
if (memcg->kmem_state == KMEM_ALLOCATED) {
- memcg_destroy_kmem_caches(memcg);
+ WARN_ON(!list_empty(&memcg->kmem_caches));
static_branch_dec(&memcg_kmem_enabled_key);
- WARN_ON(page_counter_read(&memcg->kmem));
}
}
#else
@@ -3472,6 +3561,28 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
}
#endif /* CONFIG_NUMA */
+static const unsigned int memcg1_stats[] = {
+ MEMCG_CACHE,
+ MEMCG_RSS,
+ MEMCG_RSS_HUGE,
+ NR_SHMEM,
+ NR_FILE_MAPPED,
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ MEMCG_SWAP,
+};
+
+static const char *const memcg1_stat_names[] = {
+ "cache",
+ "rss",
+ "rss_huge",
+ "shmem",
+ "mapped_file",
+ "dirty",
+ "writeback",
+ "swap",
+};
+
/* Universal VM events cgroup1 shows, original sort order */
static const unsigned int memcg1_events[] = {
PGPGIN,
@@ -3530,12 +3641,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
- (u64)memcg_page_state(memcg, i) * PAGE_SIZE);
+ (u64)memcg_page_state(memcg, memcg1_stats[i]) *
+ PAGE_SIZE);
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
- (u64)memcg_events(memcg, i));
+ (u64)memcg_events(memcg, memcg1_events[i]));
for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
@@ -4634,6 +4746,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
/* The following stuff does not apply to the root */
if (!parent) {
+#ifdef CONFIG_MEMCG_KMEM
+ INIT_LIST_HEAD(&memcg->kmem_caches);
+#endif
root_mem_cgroup = memcg;
return &memcg->css;
}
@@ -5625,112 +5740,42 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
return nbytes;
}
+static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
+{
+ seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
+ seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
+ seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
+ seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
+ seq_printf(m, "oom_kill %lu\n",
+ atomic_long_read(&events[MEMCG_OOM_KILL]));
+}
+
static int memory_events_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- seq_printf(m, "low %lu\n",
- atomic_long_read(&memcg->memory_events[MEMCG_LOW]));
- seq_printf(m, "high %lu\n",
- atomic_long_read(&memcg->memory_events[MEMCG_HIGH]));
- seq_printf(m, "max %lu\n",
- atomic_long_read(&memcg->memory_events[MEMCG_MAX]));
- seq_printf(m, "oom %lu\n",
- atomic_long_read(&memcg->memory_events[MEMCG_OOM]));
- seq_printf(m, "oom_kill %lu\n",
- atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
-
+ __memory_events_show(m, memcg->memory_events);
return 0;
}
-static int memory_stat_show(struct seq_file *m, void *v)
+static int memory_events_local_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
- int i;
-
- /*
- * Provide statistics on the state of the memory subsystem as
- * well as cumulative event counters that show past behavior.
- *
- * This list is ordered following a combination of these gradients:
- * 1) generic big picture -> specifics and details
- * 2) reflecting userspace activity -> reflecting kernel heuristics
- *
- * Current memory state:
- */
-
- seq_printf(m, "anon %llu\n",
- (u64)memcg_page_state(memcg, MEMCG_RSS) * PAGE_SIZE);
- seq_printf(m, "file %llu\n",
- (u64)memcg_page_state(memcg, MEMCG_CACHE) * PAGE_SIZE);
- seq_printf(m, "kernel_stack %llu\n",
- (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024);
- seq_printf(m, "slab %llu\n",
- (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
- memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
- PAGE_SIZE);
- seq_printf(m, "sock %llu\n",
- (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE);
-
- seq_printf(m, "shmem %llu\n",
- (u64)memcg_page_state(memcg, NR_SHMEM) * PAGE_SIZE);
- seq_printf(m, "file_mapped %llu\n",
- (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * PAGE_SIZE);
- seq_printf(m, "file_dirty %llu\n",
- (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * PAGE_SIZE);
- seq_printf(m, "file_writeback %llu\n",
- (u64)memcg_page_state(memcg, NR_WRITEBACK) * PAGE_SIZE);
- /*
- * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
- * with the NR_ANON_THP vm counter, but right now it's a pain in the
- * arse because it requires migrating the work out of rmap to a place
- * where the page->mem_cgroup is set up and stable.
- */
- seq_printf(m, "anon_thp %llu\n",
- (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * PAGE_SIZE);
-
- for (i = 0; i < NR_LRU_LISTS; i++)
- seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i],
- (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
- PAGE_SIZE);
-
- seq_printf(m, "slab_reclaimable %llu\n",
- (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
- PAGE_SIZE);
- seq_printf(m, "slab_unreclaimable %llu\n",
- (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
- PAGE_SIZE);
-
- /* Accumulated memory events */
-
- seq_printf(m, "pgfault %lu\n", memcg_events(memcg, PGFAULT));
- seq_printf(m, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT));
-
- seq_printf(m, "workingset_refault %lu\n",
- memcg_page_state(memcg, WORKINGSET_REFAULT));
- seq_printf(m, "workingset_activate %lu\n",
- memcg_page_state(memcg, WORKINGSET_ACTIVATE));
- seq_printf(m, "workingset_nodereclaim %lu\n",
- memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
-
- seq_printf(m, "pgrefill %lu\n", memcg_events(memcg, PGREFILL));
- seq_printf(m, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) +
- memcg_events(memcg, PGSCAN_DIRECT));
- seq_printf(m, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) +
- memcg_events(memcg, PGSTEAL_DIRECT));
- seq_printf(m, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE));
- seq_printf(m, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE));
- seq_printf(m, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE));
- seq_printf(m, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED));
+ __memory_events_show(m, memcg->memory_events_local);
+ return 0;
+}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- seq_printf(m, "thp_fault_alloc %lu\n",
- memcg_events(memcg, THP_FAULT_ALLOC));
- seq_printf(m, "thp_collapse_alloc %lu\n",
- memcg_events(memcg, THP_COLLAPSE_ALLOC));
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static int memory_stat_show(struct seq_file *m, void *v)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
+ char *buf;
+ buf = memory_stat_format(memcg);
+ if (!buf)
+ return -ENOMEM;
+ seq_puts(m, buf);
+ kfree(buf);
return 0;
}
@@ -5802,6 +5847,12 @@ static struct cftype memory_files[] = {
.seq_show = memory_events_show,
},
{
+ .name = "events.local",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .file_offset = offsetof(struct mem_cgroup, events_local_file),
+ .seq_show = memory_events_local_show,
+ },
+ {
.name = "stat",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = memory_stat_show,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index f045514d8d20..7e08cbf3ba49 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -213,7 +213,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
short addr_lsb = tk->size_shift;
int ret;
- pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
+ pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
pfn, t->comm, t->pid);
if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..53bd59579861 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1475,8 +1475,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
- pte_unmap_unlock(pte, ptl);
- return retval;
out_unlock:
pte_unmap_unlock(pte, ptl);
out:
@@ -1547,7 +1545,7 @@ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
int ret, i;
/* Fail if the user requested offset is beyond the end of the object */
- if (offset > num)
+ if (offset >= num)
return -ENXIO;
/* Fail if the user requested size exceeds available object size */
@@ -2038,7 +2036,6 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
int err;
- pgtable_t token;
spinlock_t *uninitialized_var(ptl);
pte = (mm == &init_mm) ?
@@ -2051,10 +2048,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
arch_enter_lazy_mmu_mode();
- token = pmd_pgtable(*pmd);
-
do {
- err = fn(pte++, token, addr, data);
+ err = fn(pte++, addr, data);
if (err)
break;
} while (addr += PAGE_SIZE, addr != end);
@@ -2807,7 +2802,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct swap_info_struct *si = swp_swap_info(entry);
if (si->flags & SWP_SYNCHRONOUS_IO &&
- __swap_count(si, entry) == 1) {
+ __swap_count(entry) == 1) {
/* skip swapcache */
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
@@ -4349,7 +4344,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *old_buf = buf;
int write = gup_flags & FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem))
+ return 0;
+
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
diff --git a/mm/mincore.c b/mm/mincore.c
index c3f058bd0faf..4fe91d497436 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -68,8 +68,16 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
*/
if (xa_is_value(page)) {
swp_entry_t swp = radix_to_swp_entry(page);
- page = find_get_page(swap_address_space(swp),
- swp_offset(swp));
+ struct swap_info_struct *si;
+
+ /* Prevent swap device to being swapoff under us */
+ si = get_swap_device(swp);
+ if (si) {
+ page = find_get_page(swap_address_space(swp),
+ swp_offset(swp));
+ put_swap_device(si);
+ } else
+ page = NULL;
}
} else
page = find_get_page(mapping, pgoff);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 513b9607409d..b5670620aea0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -274,7 +274,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
* thanks to mm_take_all_locks().
*/
spin_lock(&mm->mmu_notifier_mm->lock);
- hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
mm_drop_all_locks(mm);
diff --git a/mm/nommu.c b/mm/nommu.c
index d8c02fbe03b5..eb3e2e558da1 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -111,94 +111,6 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}
-static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking)
-{
- struct vm_area_struct *vma;
- unsigned long vm_flags;
- int i;
-
- /* calculate required read or write permissions.
- * If FOLL_FORCE is set, we only require the "MAY" flags.
- */
- vm_flags = (foll_flags & FOLL_WRITE) ?
- (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= (foll_flags & FOLL_FORCE) ?
- (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
-
- for (i = 0; i < nr_pages; i++) {
- vma = find_vma(mm, start);
- if (!vma)
- goto finish_or_fault;
-
- /* protect what we can, including chardevs */
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- !(vm_flags & vma->vm_flags))
- goto finish_or_fault;
-
- if (pages) {
- pages[i] = virt_to_page(start);
- if (pages[i])
- get_page(pages[i]);
- }
- if (vmas)
- vmas[i] = vma;
- start = (start + PAGE_SIZE) & PAGE_MASK;
- }
-
- return i;
-
-finish_or_fault:
- return i ? : -EFAULT;
-}
-
-/*
- * get a list of pages in an address range belonging to the specified process
- * and indicate the VMA that covers each page
- * - this is potentially dodgy as we may end incrementing the page count of a
- * slab page or a secondary page from a compound page
- * - don't permit access to VMAs that don't support it, such as I/O mappings
- */
-long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas)
-{
- return __get_user_pages(current, current->mm, start, nr_pages,
- gup_flags, pages, vmas, NULL);
-}
-EXPORT_SYMBOL(get_user_pages);
-
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- int *locked)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-static long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- down_read(&mm->mmap_sem);
- ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
- NULL, NULL);
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
-{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags);
-}
-EXPORT_SYMBOL(get_user_pages_unlocked);
-
/**
* follow_pfn - look up PFN at a user virtual address
* @vma: memory mapping
@@ -1792,7 +1704,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem))
+ return 0;
/* the access must start within one of the target process's mappings */
vma = find_vma(mm, addr);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f719b64741d6..eda2e2a0bdc6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -64,21 +64,33 @@ int sysctl_oom_dump_tasks = 1;
*/
DEFINE_MUTEX(oom_lock);
+static inline bool is_memcg_oom(struct oom_control *oc)
+{
+ return oc->memcg != NULL;
+}
+
#ifdef CONFIG_NUMA
/**
- * has_intersects_mems_allowed() - check task eligiblity for kill
+ * oom_cpuset_eligible() - check task eligiblity for kill
* @start: task struct of which task to consider
* @mask: nodemask passed to page allocator for mempolicy ooms
*
* Task eligibility is determined by whether or not a candidate task, @tsk,
* shares the same mempolicy nodes as current if it is bound by such a policy
* and whether or not it has the same set of allowed cpuset nodes.
+ *
+ * This function is assuming oom-killer context and 'current' has triggered
+ * the oom-killer.
*/
-static bool has_intersects_mems_allowed(struct task_struct *start,
- const nodemask_t *mask)
+static bool oom_cpuset_eligible(struct task_struct *start,
+ struct oom_control *oc)
{
struct task_struct *tsk;
bool ret = false;
+ const nodemask_t *mask = oc->nodemask;
+
+ if (is_memcg_oom(oc))
+ return true;
rcu_read_lock();
for_each_thread(start, tsk) {
@@ -105,8 +117,7 @@ static bool has_intersects_mems_allowed(struct task_struct *start,
return ret;
}
#else
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
- const nodemask_t *mask)
+static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
{
return true;
}
@@ -146,28 +157,13 @@ static inline bool is_sysrq_oom(struct oom_control *oc)
return oc->order == -1;
}
-static inline bool is_memcg_oom(struct oom_control *oc)
-{
- return oc->memcg != NULL;
-}
-
/* return true if the task is not adequate as candidate victim task. */
-static bool oom_unkillable_task(struct task_struct *p,
- struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static bool oom_unkillable_task(struct task_struct *p)
{
if (is_global_init(p))
return true;
if (p->flags & PF_KTHREAD)
return true;
-
- /* When mem_cgroup_out_of_memory() and p is not member of the group */
- if (memcg && !task_in_mem_cgroup(p, memcg))
- return true;
-
- /* p may not have freeable memory in nodemask */
- if (!has_intersects_mems_allowed(p, nodemask))
- return true;
-
return false;
}
@@ -194,20 +190,17 @@ static bool is_dump_unreclaim_slabs(void)
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
- * @memcg: task's memory controller, if constrained
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
-unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
{
long points;
long adj;
- if (oom_unkillable_task(p, memcg, nodemask))
+ if (oom_unkillable_task(p))
return 0;
p = find_lock_task_mm(p);
@@ -318,7 +311,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
struct oom_control *oc = arg;
unsigned long points;
- if (oom_unkillable_task(task, NULL, oc->nodemask))
+ if (oom_unkillable_task(task))
+ goto next;
+
+ /* p may not have freeable memory in nodemask */
+ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
goto next;
/*
@@ -342,13 +339,10 @@ static int oom_evaluate_task(struct task_struct *task, void *arg)
goto select;
}
- points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
+ points = oom_badness(task, oc->totalpages);
if (!points || points < oc->chosen_points)
goto next;
- /* Prefer thread group leaders for display purposes */
- if (points == oc->chosen_points && thread_group_leader(oc->chosen))
- goto next;
select:
if (oc->chosen)
put_task_struct(oc->chosen);
@@ -381,14 +375,44 @@ static void select_bad_process(struct oom_control *oc)
break;
rcu_read_unlock();
}
+}
- oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
+static int dump_task(struct task_struct *p, void *arg)
+{
+ struct oom_control *oc = arg;
+ struct task_struct *task;
+
+ if (oom_unkillable_task(p))
+ return 0;
+
+ /* p may not have freeable memory in nodemask */
+ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
+ return 0;
+
+ task = find_lock_task_mm(p);
+ if (!task) {
+ /*
+ * This is a kthread or all of p's threads have already
+ * detached their mm's. There's no need to report
+ * them; they can't be oom killed anyway.
+ */
+ return 0;
+ }
+
+ pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
+ task->pid, from_kuid(&init_user_ns, task_uid(task)),
+ task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
+ mm_pgtables_bytes(task->mm),
+ get_mm_counter(task->mm, MM_SWAPENTS),
+ task->signal->oom_score_adj, task->comm);
+ task_unlock(task);
+
+ return 0;
}
/**
* dump_tasks - dump current memory state of all system tasks
- * @memcg: current's memory controller, if constrained
- * @nodemask: nodemask passed to page allocator for mempolicy ooms
+ * @oc: pointer to struct oom_control
*
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
@@ -396,37 +420,21 @@ static void select_bad_process(struct oom_control *oc)
* State information includes task's pid, uid, tgid, vm size, rss,
* pgtables_bytes, swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static void dump_tasks(struct oom_control *oc)
{
- struct task_struct *p;
- struct task_struct *task;
-
pr_info("Tasks state (memory values in pages):\n");
pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
- rcu_read_lock();
- for_each_process(p) {
- if (oom_unkillable_task(p, memcg, nodemask))
- continue;
- task = find_lock_task_mm(p);
- if (!task) {
- /*
- * This is a kthread or all of p's threads have already
- * detached their mm's. There's no need to report
- * them; they can't be oom killed anyway.
- */
- continue;
- }
+ if (is_memcg_oom(oc))
+ mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
+ else {
+ struct task_struct *p;
- pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
- task->pid, from_kuid(&init_user_ns, task_uid(task)),
- task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- mm_pgtables_bytes(task->mm),
- get_mm_counter(task->mm, MM_SWAPENTS),
- task->signal->oom_score_adj, task->comm);
- task_unlock(task);
+ rcu_read_lock();
+ for_each_process(p)
+ dump_task(p, oc);
+ rcu_read_unlock();
}
- rcu_read_unlock();
}
static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
@@ -458,7 +466,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
dump_unreclaimable_slab();
}
if (sysctl_oom_dump_tasks)
- dump_tasks(oc->memcg, oc->nodemask);
+ dump_tasks(oc);
if (p)
dump_oom_summary(oc, p);
}
@@ -1075,7 +1083,8 @@ bool out_of_memory(struct oom_control *oc)
check_panic_on_oom(oc);
if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
- current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
+ current->mm && !oom_unkillable_task(current) &&
+ oom_cpuset_eligible(current, oc) &&
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
get_task_struct(current);
oc->chosen = current;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bdbe8b6b1225..1804f64ff43c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2429,7 +2429,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
this_cpu_inc(bdp_ratelimits);
}
}
-EXPORT_SYMBOL(account_page_dirtied);
/*
* Helper function for deaccounting dirty page without writeback.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e3bc949ebcc..dbd0d5cbbcbb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -50,7 +50,6 @@
#include <linux/backing-dev.h>
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
-#include <linux/page_ext.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
#include <linux/compaction.h>
@@ -136,6 +135,55 @@ unsigned long totalcma_pages __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
+#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
+DEFINE_STATIC_KEY_TRUE(init_on_alloc);
+#else
+DEFINE_STATIC_KEY_FALSE(init_on_alloc);
+#endif
+EXPORT_SYMBOL(init_on_alloc);
+
+#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
+DEFINE_STATIC_KEY_TRUE(init_on_free);
+#else
+DEFINE_STATIC_KEY_FALSE(init_on_free);
+#endif
+EXPORT_SYMBOL(init_on_free);
+
+static int __init early_init_on_alloc(char *buf)
+{
+ int ret;
+ bool bool_result;
+
+ if (!buf)
+ return -EINVAL;
+ ret = kstrtobool(buf, &bool_result);
+ if (bool_result && page_poisoning_enabled())
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n");
+ if (bool_result)
+ static_branch_enable(&init_on_alloc);
+ else
+ static_branch_disable(&init_on_alloc);
+ return ret;
+}
+early_param("init_on_alloc", early_init_on_alloc);
+
+static int __init early_init_on_free(char *buf)
+{
+ int ret;
+ bool bool_result;
+
+ if (!buf)
+ return -EINVAL;
+ ret = kstrtobool(buf, &bool_result);
+ if (bool_result && page_poisoning_enabled())
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n");
+ if (bool_result)
+ static_branch_enable(&init_on_free);
+ else
+ static_branch_disable(&init_on_free);
+ return ret;
+}
+early_param("init_on_free", early_init_on_free);
/*
* A cached value of the page's pageblock's migratetype, used when the page is
@@ -224,8 +272,6 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
[ZONE_MOVABLE] = 0,
};
-EXPORT_SYMBOL(totalram_pages);
-
static char * const zone_names[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
"DMA",
@@ -646,30 +692,29 @@ void prep_compound_page(struct page *page, unsigned int order)
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
-bool _debug_pagealloc_enabled __read_mostly
- = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
+DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#else
+DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+#endif
EXPORT_SYMBOL(_debug_pagealloc_enabled);
-bool _debug_guardpage_enabled __read_mostly;
+
+DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
static int __init early_debug_pagealloc(char *buf)
{
- if (!buf)
+ bool enable = false;
+
+ if (kstrtobool(buf, &enable))
return -EINVAL;
- return kstrtobool(buf, &_debug_pagealloc_enabled);
-}
-early_param("debug_pagealloc", early_debug_pagealloc);
-static bool need_debug_guardpage(void)
-{
- /* If we don't use debug_pagealloc, we don't need guard page */
- if (!debug_pagealloc_enabled())
- return false;
+ if (enable)
+ static_branch_enable(&_debug_pagealloc_enabled);
- if (!debug_guardpage_minorder())
- return false;
-
- return true;
+ return 0;
}
+early_param("debug_pagealloc", early_debug_pagealloc);
static void init_debug_guardpage(void)
{
@@ -679,14 +724,9 @@ static void init_debug_guardpage(void)
if (!debug_guardpage_minorder())
return;
- _debug_guardpage_enabled = true;
+ static_branch_enable(&_debug_guardpage_enabled);
}
-struct page_ext_operations debug_guardpage_ops = {
- .need = need_debug_guardpage,
- .init = init_debug_guardpage,
-};
-
static int __init debug_guardpage_minorder_setup(char *buf)
{
unsigned long res;
@@ -704,20 +744,13 @@ early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
{
- struct page_ext *page_ext;
-
if (!debug_guardpage_enabled())
return false;
if (order >= debug_guardpage_minorder())
return false;
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return false;
-
- __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
-
+ __SetPageGuard(page);
INIT_LIST_HEAD(&page->lru);
set_page_private(page, order);
/* Guard pages are not available for any usage */
@@ -729,23 +762,16 @@ static inline bool set_page_guard(struct zone *zone, struct page *page,
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype)
{
- struct page_ext *page_ext;
-
if (!debug_guardpage_enabled())
return;
- page_ext = lookup_page_ext(page);
- if (unlikely(!page_ext))
- return;
-
- __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+ __ClearPageGuard(page);
set_page_private(page, 0);
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}
#else
-struct page_ext_operations debug_guardpage_ops;
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -1090,6 +1116,14 @@ out:
return ret;
}
+static void kernel_init_free_pages(struct page *page, int numpages)
+{
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ clear_highpage(page + i);
+}
+
static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free)
{
@@ -1141,6 +1175,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
PAGE_SIZE << order);
}
arch_free_page(page, order);
+ if (want_init_on_free())
+ kernel_init_free_pages(page, 1 << order);
+
kernel_poison_pages(page, 1 << order, 0);
if (debug_pagealloc_enabled())
kernel_map_pages(page, 1 << order, 0);
@@ -1151,19 +1188,36 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
#ifdef CONFIG_DEBUG_VM
-static inline bool free_pcp_prepare(struct page *page)
+/*
+ * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
+ * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
+ * moved from pcp lists to free lists.
+ */
+static bool free_pcp_prepare(struct page *page)
{
return free_pages_prepare(page, 0, true);
}
-static inline bool bulkfree_pcp_prepare(struct page *page)
+static bool bulkfree_pcp_prepare(struct page *page)
{
- return false;
+ if (debug_pagealloc_enabled())
+ return free_pages_check(page);
+ else
+ return false;
}
#else
+/*
+ * With DEBUG_VM disabled, order-0 pages being freed are checked only when
+ * moving from pcp lists to free list in order to reduce overhead. With
+ * debug_pagealloc enabled, they are checked also immediately when being freed
+ * to the pcp lists.
+ */
static bool free_pcp_prepare(struct page *page)
{
- return free_pages_prepare(page, 0, false);
+ if (debug_pagealloc_enabled())
+ return free_pages_prepare(page, 0, true);
+ else
+ return free_pages_prepare(page, 0, false);
}
static bool bulkfree_pcp_prepare(struct page *page)
@@ -1904,6 +1958,10 @@ void __init page_alloc_init_late(void)
for_each_populated_zone(zone)
set_zone_contiguous(zone);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ init_debug_guardpage();
+#endif
}
#ifdef CONFIG_CMA
@@ -2021,28 +2079,44 @@ static inline int check_new_page(struct page *page)
static inline bool free_pages_prezeroed(void)
{
- return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
- page_poisoning_enabled();
+ return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
+ page_poisoning_enabled()) || want_init_on_free();
}
#ifdef CONFIG_DEBUG_VM
-static bool check_pcp_refill(struct page *page)
+/*
+ * With DEBUG_VM enabled, order-0 pages are checked for expected state when
+ * being allocated from pcp lists. With debug_pagealloc also enabled, they are
+ * also checked when pcp lists are refilled from the free lists.
+ */
+static inline bool check_pcp_refill(struct page *page)
{
- return false;
+ if (debug_pagealloc_enabled())
+ return check_new_page(page);
+ else
+ return false;
}
-static bool check_new_pcp(struct page *page)
+static inline bool check_new_pcp(struct page *page)
{
return check_new_page(page);
}
#else
-static bool check_pcp_refill(struct page *page)
+/*
+ * With DEBUG_VM disabled, free order-0 pages are checked for expected state
+ * when pcp lists are being refilled from the free lists. With debug_pagealloc
+ * enabled, they are also checked when being allocated from the pcp lists.
+ */
+static inline bool check_pcp_refill(struct page *page)
{
return check_new_page(page);
}
-static bool check_new_pcp(struct page *page)
+static inline bool check_new_pcp(struct page *page)
{
- return false;
+ if (debug_pagealloc_enabled())
+ return check_new_page(page);
+ else
+ return false;
}
#endif /* CONFIG_DEBUG_VM */
@@ -2076,13 +2150,10 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags)
{
- int i;
-
post_alloc_hook(page, order, gfp_flags);
- if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
- for (i = 0; i < (1 << order); i++)
- clear_highpage(page + i);
+ if (!free_pages_prezeroed() && want_init_on_alloc(gfp_flags))
+ kernel_init_free_pages(page, 1 << order);
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
@@ -7520,10 +7591,28 @@ static int page_alloc_cpu_dead(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_NUMA
+int hashdist = HASHDIST_DEFAULT;
+
+static int __init set_hashdist(char *str)
+{
+ if (!str)
+ return 0;
+ hashdist = simple_strtoul(str, &str, 0);
+ return 1;
+}
+__setup("hashdist=", set_hashdist);
+#endif
+
void __init page_alloc_init(void)
{
int ret;
+#ifdef CONFIG_NUMA
+ if (num_node_state(N_MEMORY) == 1)
+ hashdist = 0;
+#endif
+
ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
"mm/page_alloc:dead", NULL,
page_alloc_cpu_dead);
@@ -7908,19 +7997,6 @@ out:
return ret;
}
-#ifdef CONFIG_NUMA
-int hashdist = HASHDIST_DEFAULT;
-
-static int __init set_hashdist(char *str)
-{
- if (!str)
- return 0;
- hashdist = simple_strtoul(str, &str, 0);
- return 1;
-}
-__setup("hashdist=", set_hashdist);
-#endif
-
#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
/*
* Returns the number of pages that arch has reserved but
@@ -7967,6 +8043,7 @@ void *__init alloc_large_system_hash(const char *tablename,
unsigned long log2qty, size;
void *table = NULL;
gfp_t gfp_flags;
+ bool virt;
/* allow the kernel cmdline to have a say */
if (!numentries) {
@@ -8023,6 +8100,7 @@ void *__init alloc_large_system_hash(const char *tablename,
gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
do {
+ virt = false;
size = bucketsize << log2qty;
if (flags & HASH_EARLY) {
if (flags & HASH_ZERO)
@@ -8030,26 +8108,26 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (hashdist) {
+ } else if (get_order(size) >= MAX_ORDER || hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
+ virt = true;
} else {
/*
* If bucketsize is not a power-of-two, we may free
* some pages at the end of hash table which
* alloc_pages_exact() automatically does
*/
- if (get_order(size) < MAX_ORDER) {
- table = alloc_pages_exact(size, gfp_flags);
- kmemleak_alloc(table, size, 1, gfp_flags);
- }
+ table = alloc_pages_exact(size, gfp_flags);
+ kmemleak_alloc(table, size, 1, gfp_flags);
}
} while (!table && size > PAGE_SIZE && --log2qty);
if (!table)
panic("Failed to allocate %s hash table\n", tablename);
- pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
- tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
+ pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+ tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
+ virt ? "vmalloc" : "linear");
if (_hash_shift)
*_hash_shift = log2qty;
diff --git a/mm/page_ext.c b/mm/page_ext.c
index d8f1aca4ad43..5f5769c7db3b 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -59,9 +59,6 @@
*/
static struct page_ext_operations *page_ext_ops[] = {
-#ifdef CONFIG_DEBUG_PAGEALLOC
- &debug_guardpage_ops,
-#endif
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
diff --git a/mm/page_io.c b/mm/page_io.c
index a39aac2f8c8d..24ee600f9131 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -163,7 +163,7 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
blocks_per_page = PAGE_SIZE >> blkbits;
/*
- * Map all the blocks into the extent list. This code doesn't try
+ * Map all the blocks into the extent tree. This code doesn't try
* to be very smart.
*/
probe_block = 0;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index e3638a5bafff..89c19c0feadb 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -230,7 +230,7 @@ undo:
/*
* Make isolated pages available again.
*/
-int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype)
{
unsigned long pfn;
@@ -247,7 +247,6 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
continue;
unset_migratetype_isolate(page, migratetype);
}
- return 0;
}
/*
* Test all pages in the range is free(means isolated) or not.
diff --git a/mm/slab.c b/mm/slab.c
index f7117ad9b3a3..9df370558e5d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
- return page->slab_cache;
-}
-
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
@@ -1245,7 +1239,7 @@ void __init kmem_cache_init(void)
nr_node_ids * sizeof(struct kmem_cache_node *),
SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches);
- memcg_link_cache(kmem_cache);
+ memcg_link_cache(kmem_cache, NULL);
slab_state = PARTIAL;
/*
@@ -1366,7 +1360,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct page *page;
- int nr_pages;
flags |= cachep->allocflags;
@@ -1376,17 +1369,11 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
+ if (charge_slab_page(page, flags, cachep->gfporder, cachep)) {
__free_pages(page, cachep->gfporder);
return NULL;
}
- nr_pages = (1 << cachep->gfporder);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
-
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1401,12 +1388,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
int order = cachep->gfporder;
- unsigned long nr_freed = (1 << order);
-
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
- else
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
@@ -1415,8 +1396,8 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
page->mapping = NULL;
if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += nr_freed;
- memcg_uncharge_slab(page, order, cachep);
+ current->reclaim_state->reclaimed_slab += 1 << order;
+ uncharge_slab_page(page, order, cachep);
__free_pages(page, order);
}
@@ -1830,6 +1811,14 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
cachep->num = 0;
+ /*
+ * If slab auto-initialization on free is enabled, store the freelist
+ * off-slab, so that its contents don't end up in one of the allocated
+ * objects.
+ */
+ if (unlikely(slab_want_init_on_free(cachep)))
+ return false;
+
if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
return false;
@@ -2258,6 +2247,10 @@ void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
{
__kmem_cache_shrink(cachep);
}
+
+void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
+{
+}
#endif
int __kmem_cache_shutdown(struct kmem_cache *cachep)
@@ -3263,7 +3256,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- if (unlikely(flags & __GFP_ZERO) && ptr)
+ if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
slab_post_alloc_hook(cachep, flags, 1, &ptr);
@@ -3320,7 +3313,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
- if (unlikely(flags & __GFP_ZERO) && objp)
+ if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
slab_post_alloc_hook(cachep, flags, 1, &objp);
@@ -3441,6 +3434,8 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
+ if (unlikely(slab_want_init_on_free(cachep)))
+ memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
@@ -3528,7 +3523,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
/* Clear memory outside IRQ disabled section */
- if (unlikely(flags & __GFP_ZERO))
+ if (unlikely(slab_want_init_on_alloc(flags, s)))
for (i = 0; i < size; i++)
memset(p[i], 0, s->object_size);
@@ -3715,6 +3710,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
s = virt_to_cache(objp);
else
s = cache_from_obj(orig_s, objp);
+ if (!s)
+ continue;
debug_check_no_locks_freed(objp, s->object_size);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
@@ -3749,6 +3746,10 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
+ if (!c) {
+ local_irq_restore(flags);
+ return;
+ }
debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size);
@@ -4204,33 +4205,23 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
#endif /* CONFIG_HARDENED_USERCOPY */
/**
- * ksize - get the actual amount of memory allocated for a given object
- * @objp: Pointer to the object
- *
- * kmalloc may internally round up allocations and return more memory
- * than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
- * The caller must guarantee that objp points to a valid object previously
- * allocated with either kmalloc() or kmem_cache_alloc(). The object
- * must not be freed during the duration of the call.
+ * __ksize -- Uninstrumented ksize.
*
- * Return: size of the actual memory used by @objp in bytes
+ * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
+ * safety checks as ksize() with KASAN instrumentation enabled.
*/
-size_t ksize(const void *objp)
+size_t __ksize(const void *objp)
{
+ struct kmem_cache *c;
size_t size;
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- size = virt_to_cache(objp)->object_size;
- /* We assume that ksize callers could use the whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_shadow(objp, size);
+ c = virt_to_cache(objp);
+ size = c ? c->object_size : 0;
return size;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
diff --git a/mm/slab.h b/mm/slab.h
index 43ac818b8592..9057b8056b07 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -172,6 +172,7 @@ int __kmem_cache_shutdown(struct kmem_cache *);
void __kmem_cache_release(struct kmem_cache *);
int __kmem_cache_shrink(struct kmem_cache *);
void __kmemcg_cache_deactivate(struct kmem_cache *s);
+void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
@@ -204,6 +205,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+static inline int cache_vmstat_idx(struct kmem_cache *s)
+{
+ return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
+}
+
#ifdef CONFIG_MEMCG_KMEM
/* List of all root caches. */
@@ -241,31 +248,6 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name;
}
-/*
- * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
- * That said the caller must assure the memcg's cache won't go away by either
- * taking a css reference to the owner cgroup, or holding the slab_mutex.
- */
-static inline struct kmem_cache *
-cache_from_memcg_idx(struct kmem_cache *s, int idx)
-{
- struct kmem_cache *cachep;
- struct memcg_cache_array *arr;
-
- rcu_read_lock();
- arr = rcu_dereference(s->memcg_params.memcg_caches);
-
- /*
- * Make sure we will access the up-to-date value. The code updating
- * memcg_caches issues a write barrier to match this (see
- * memcg_create_kmem_cache()).
- */
- cachep = READ_ONCE(arr->entries[idx]);
- rcu_read_unlock();
-
- return cachep;
-}
-
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
if (is_root_cache(s))
@@ -273,25 +255,94 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s->memcg_params.root_cache;
}
+/*
+ * Expects a pointer to a slab page. Please note, that PageSlab() check
+ * isn't sufficient, as it returns true also for tail compound slab pages,
+ * which do not have slab_cache pointer set.
+ * So this function assumes that the page can pass PageHead() and PageSlab()
+ * checks.
+ *
+ * The kmem_cache can be reparented asynchronously. The caller must ensure
+ * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
+ */
+static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
+{
+ struct kmem_cache *s;
+
+ s = READ_ONCE(page->slab_cache);
+ if (s && !is_root_cache(s))
+ return READ_ONCE(s->memcg_params.memcg);
+
+ return NULL;
+}
+
+/*
+ * Charge the slab page belonging to the non-root kmem_cache.
+ * Can be called for non-root kmem_caches only.
+ */
static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
- if (is_root_cache(s))
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+ int ret;
+
+ rcu_read_lock();
+ memcg = READ_ONCE(s->memcg_params.memcg);
+ while (memcg && !css_tryget_online(&memcg->css))
+ memcg = parent_mem_cgroup(memcg);
+ rcu_read_unlock();
+
+ if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ (1 << order));
+ percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
return 0;
- return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
+ }
+
+ ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ if (ret)
+ goto out;
+
+ lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
+ mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
+
+ /* transer try_charge() page references to kmem_cache */
+ percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
+ css_put_many(&memcg->css, 1 << order);
+out:
+ css_put(&memcg->css);
+ return ret;
}
+/*
+ * Uncharge a slab page belonging to a non-root kmem_cache.
+ * Can be called for non-root kmem_caches only.
+ */
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct kmem_cache *s)
{
- memcg_kmem_uncharge(page, order);
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = READ_ONCE(s->memcg_params.memcg);
+ if (likely(!mem_cgroup_is_root(memcg))) {
+ lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
+ mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
+ memcg_kmem_uncharge_memcg(page, order, memcg);
+ } else {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ -(1 << order));
+ }
+ rcu_read_unlock();
+
+ percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
}
extern void slab_init_memcg_params(struct kmem_cache *);
-extern void memcg_link_cache(struct kmem_cache *s);
-extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
- void (*deact_fn)(struct kmem_cache *));
+extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
#else /* CONFIG_MEMCG_KMEM */
@@ -310,7 +361,7 @@ static inline bool is_root_cache(struct kmem_cache *s)
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
- return true;
+ return s == p;
}
static inline const char *cache_name(struct kmem_cache *s)
@@ -318,15 +369,14 @@ static inline const char *cache_name(struct kmem_cache *s)
return s->name;
}
-static inline struct kmem_cache *
-cache_from_memcg_idx(struct kmem_cache *s, int idx)
+static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
- return NULL;
+ return s;
}
-static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
+static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
{
- return s;
+ return NULL;
}
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
@@ -344,16 +394,52 @@ static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
-static inline void memcg_link_cache(struct kmem_cache *s)
+static inline void memcg_link_cache(struct kmem_cache *s,
+ struct mem_cgroup *memcg)
{
}
#endif /* CONFIG_MEMCG_KMEM */
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page;
+
+ page = virt_to_head_page(obj);
+ if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n",
+ __func__))
+ return NULL;
+ return page->slab_cache;
+}
+
+static __always_inline int charge_slab_page(struct page *page,
+ gfp_t gfp, int order,
+ struct kmem_cache *s)
+{
+ if (is_root_cache(s)) {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ 1 << order);
+ return 0;
+ }
+
+ return memcg_charge_slab(page, gfp, order, s);
+}
+
+static __always_inline void uncharge_slab_page(struct page *page, int order,
+ struct kmem_cache *s)
+{
+ if (is_root_cache(s)) {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ -(1 << order));
+ return;
+ }
+
+ memcg_uncharge_slab(page, order, s);
+}
+
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
- struct page *page;
/*
* When kmemcg is not being used, both assignments should return the
@@ -363,18 +449,15 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* will also be a constant.
*/
if (!memcg_kmem_enabled() &&
+ !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s;
- page = virt_to_head_page(x);
- cachep = page->slab_cache;
- if (slab_equal_or_root(cachep, s))
- return cachep;
-
- pr_err("%s: Wrong slab cache. %s but object is from %s\n",
- __func__, s->name, cachep->name);
- WARN_ON_ONCE(1);
- return s;
+ cachep = virt_to_cache(x);
+ WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
+ "%s: Wrong slab cache. %s but object is from %s\n",
+ __func__, s->name, cachep->name);
+ return cachep;
}
static inline size_t slab_ksize(const struct kmem_cache *s)
@@ -524,4 +607,24 @@ static inline int cache_random_seq_create(struct kmem_cache *cachep,
static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
+static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
+{
+ if (static_branch_unlikely(&init_on_alloc)) {
+ if (c->ctor)
+ return false;
+ if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
+ return flags & __GFP_ZERO;
+ return true;
+ }
+ return flags & __GFP_ZERO;
+}
+
+static inline bool slab_want_init_on_free(struct kmem_cache *c)
+{
+ if (static_branch_unlikely(&init_on_free))
+ return !(c->ctor ||
+ (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
+ return false;
+}
+
#endif /* MM_SLAB_H */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 58251ba63e4a..6c49dbb3769e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -17,6 +17,7 @@
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -130,6 +131,9 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
#ifdef CONFIG_MEMCG_KMEM
LIST_HEAD(slab_root_caches);
+static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
+
+static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
void slab_init_memcg_params(struct kmem_cache *s)
{
@@ -140,13 +144,18 @@ void slab_init_memcg_params(struct kmem_cache *s)
}
static int init_memcg_params(struct kmem_cache *s,
- struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+ struct kmem_cache *root_cache)
{
struct memcg_cache_array *arr;
if (root_cache) {
+ int ret = percpu_ref_init(&s->memcg_params.refcnt,
+ kmemcg_cache_shutdown,
+ 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
s->memcg_params.root_cache = root_cache;
- s->memcg_params.memcg = memcg;
INIT_LIST_HEAD(&s->memcg_params.children_node);
INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
return 0;
@@ -171,6 +180,8 @@ static void destroy_memcg_params(struct kmem_cache *s)
{
if (is_root_cache(s))
kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+ else
+ percpu_ref_exit(&s->memcg_params.refcnt);
}
static void free_memcg_params(struct rcu_head *rcu)
@@ -221,11 +232,13 @@ int memcg_update_all_caches(int num_memcgs)
return ret;
}
-void memcg_link_cache(struct kmem_cache *s)
+void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
{
if (is_root_cache(s)) {
list_add(&s->root_caches_node, &slab_root_caches);
} else {
+ css_get(&memcg->css);
+ s->memcg_params.memcg = memcg;
list_add(&s->memcg_params.children_node,
&s->memcg_params.root_cache->memcg_params.children);
list_add(&s->memcg_params.kmem_caches_node,
@@ -240,11 +253,13 @@ static void memcg_unlink_cache(struct kmem_cache *s)
} else {
list_del(&s->memcg_params.children_node);
list_del(&s->memcg_params.kmem_caches_node);
+ mem_cgroup_put(s->memcg_params.memcg);
+ WRITE_ONCE(s->memcg_params.memcg, NULL);
}
}
#else
static inline int init_memcg_params(struct kmem_cache *s,
- struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+ struct kmem_cache *root_cache)
{
return 0;
}
@@ -384,7 +399,7 @@ static struct kmem_cache *create_cache(const char *name,
s->useroffset = useroffset;
s->usersize = usersize;
- err = init_memcg_params(s, memcg, root_cache);
+ err = init_memcg_params(s, root_cache);
if (err)
goto out_free_cache;
@@ -394,7 +409,7 @@ static struct kmem_cache *create_cache(const char *name,
s->refcount = 1;
list_add(&s->list, &slab_caches);
- memcg_link_cache(s);
+ memcg_link_cache(s, memcg);
out:
if (err)
return ERR_PTR(err);
@@ -640,7 +655,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
* The memory cgroup could have been offlined while the cache
* creation work was pending.
*/
- if (memcg->kmem_state != KMEM_ONLINE || root_cache->memcg_params.dying)
+ if (memcg->kmem_state != KMEM_ONLINE)
goto out_unlock;
idx = memcg_cache_id(memcg);
@@ -677,7 +692,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
}
/*
- * Since readers won't lock (see cache_from_memcg_idx()), we need a
+ * Since readers won't lock (see memcg_kmem_get_cache()), we need a
* barrier here to ensure nobody will see the kmem_cache partially
* initialized.
*/
@@ -691,74 +706,95 @@ out_unlock:
put_online_cpus();
}
-static void kmemcg_deactivate_workfn(struct work_struct *work)
+static void kmemcg_workfn(struct work_struct *work)
{
struct kmem_cache *s = container_of(work, struct kmem_cache,
- memcg_params.deact_work);
+ memcg_params.work);
get_online_cpus();
get_online_mems();
mutex_lock(&slab_mutex);
-
- s->memcg_params.deact_fn(s);
-
+ s->memcg_params.work_fn(s);
mutex_unlock(&slab_mutex);
put_online_mems();
put_online_cpus();
-
- /* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */
- css_put(&s->memcg_params.memcg->css);
}
-static void kmemcg_deactivate_rcufn(struct rcu_head *head)
+static void kmemcg_rcufn(struct rcu_head *head)
{
struct kmem_cache *s = container_of(head, struct kmem_cache,
- memcg_params.deact_rcu_head);
+ memcg_params.rcu_head);
/*
- * We need to grab blocking locks. Bounce to ->deact_work. The
+ * We need to grab blocking locks. Bounce to ->work. The
* work item shares the space with the RCU head and can't be
* initialized eariler.
*/
- INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
- queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
+ INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
+ queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
}
-/**
- * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
- * sched RCU grace period
- * @s: target kmem_cache
- * @deact_fn: deactivation function to call
- *
- * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex
- * held after a sched RCU grace period. The slab is guaranteed to stay
- * alive until @deact_fn is finished. This is to be used from
- * __kmemcg_cache_deactivate().
- */
-void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
- void (*deact_fn)(struct kmem_cache *))
+static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
{
- if (WARN_ON_ONCE(is_root_cache(s)) ||
- WARN_ON_ONCE(s->memcg_params.deact_fn))
- return;
+ WARN_ON(shutdown_cache(s));
+}
+
+static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
+{
+ struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
+ memcg_params.refcnt);
+ unsigned long flags;
+ spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
if (s->memcg_params.root_cache->memcg_params.dying)
+ goto unlock;
+
+ s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
+ INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
+ queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
+
+unlock:
+ spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
+}
+
+static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
+{
+ __kmemcg_cache_deactivate_after_rcu(s);
+ percpu_ref_kill(&s->memcg_params.refcnt);
+}
+
+static void kmemcg_cache_deactivate(struct kmem_cache *s)
+{
+ if (WARN_ON_ONCE(is_root_cache(s)))
return;
- /* pin memcg so that @s doesn't get destroyed in the middle */
- css_get(&s->memcg_params.memcg->css);
+ __kmemcg_cache_deactivate(s);
+ s->flags |= SLAB_DEACTIVATED;
+
+ /*
+ * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
+ * flag and make sure that no new kmem_cache deactivation tasks
+ * are queued (see flush_memcg_workqueue() ).
+ */
+ spin_lock_irq(&memcg_kmem_wq_lock);
+ if (s->memcg_params.root_cache->memcg_params.dying)
+ goto unlock;
- s->memcg_params.deact_fn = deact_fn;
- call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+ s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
+ call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
+unlock:
+ spin_unlock_irq(&memcg_kmem_wq_lock);
}
-void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
+void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
+ struct mem_cgroup *parent)
{
int idx;
struct memcg_cache_array *arr;
struct kmem_cache *s, *c;
+ unsigned int nr_reparented;
idx = memcg_cache_id(memcg);
@@ -773,30 +809,20 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
if (!c)
continue;
- __kmemcg_cache_deactivate(c);
+ kmemcg_cache_deactivate(c);
arr->entries[idx] = NULL;
}
- mutex_unlock(&slab_mutex);
-
- put_online_mems();
- put_online_cpus();
-}
-
-void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
-{
- struct kmem_cache *s, *s2;
-
- get_online_cpus();
- get_online_mems();
-
- mutex_lock(&slab_mutex);
- list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
- memcg_params.kmem_caches_node) {
- /*
- * The cgroup is about to be freed and therefore has no charges
- * left. Hence, all its caches must be empty by now.
- */
- BUG_ON(shutdown_cache(s));
+ nr_reparented = 0;
+ list_for_each_entry(s, &memcg->kmem_caches,
+ memcg_params.kmem_caches_node) {
+ WRITE_ONCE(s->memcg_params.memcg, parent);
+ css_put(&memcg->css);
+ nr_reparented++;
+ }
+ if (nr_reparented) {
+ list_splice_init(&memcg->kmem_caches,
+ &parent->kmem_caches);
+ css_get_many(&parent->css, nr_reparented);
}
mutex_unlock(&slab_mutex);
@@ -861,16 +887,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
static void flush_memcg_workqueue(struct kmem_cache *s)
{
- mutex_lock(&slab_mutex);
+ spin_lock_irq(&memcg_kmem_wq_lock);
s->memcg_params.dying = true;
- mutex_unlock(&slab_mutex);
+ spin_unlock_irq(&memcg_kmem_wq_lock);
/*
- * SLUB deactivates the kmem_caches through call_rcu. Make
+ * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked.
*/
- if (IS_ENABLED(CONFIG_SLUB))
- rcu_barrier();
+ rcu_barrier();
/*
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
@@ -997,7 +1022,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
create_boot_cache(s, name, size, flags, useroffset, usersize);
list_add(&s->list, &slab_caches);
- memcg_link_cache(s);
+ memcg_link_cache(s, NULL);
s->refcount = 1;
return s;
}
@@ -1498,6 +1523,64 @@ static int __init slab_proc_init(void)
return 0;
}
module_init(slab_proc_init);
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
+/*
+ * Display information about kmem caches that have child memcg caches.
+ */
+static int memcg_slabinfo_show(struct seq_file *m, void *unused)
+{
+ struct kmem_cache *s, *c;
+ struct slabinfo sinfo;
+
+ mutex_lock(&slab_mutex);
+ seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
+ seq_puts(m, " <active_slabs> <num_slabs>\n");
+ list_for_each_entry(s, &slab_root_caches, root_caches_node) {
+ /*
+ * Skip kmem caches that don't have any memcg children.
+ */
+ if (list_empty(&s->memcg_params.children))
+ continue;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ get_slabinfo(s, &sinfo);
+ seq_printf(m, "%-17s root %6lu %6lu %6lu %6lu\n",
+ cache_name(s), sinfo.active_objs, sinfo.num_objs,
+ sinfo.active_slabs, sinfo.num_slabs);
+
+ for_each_memcg_cache(c, s) {
+ struct cgroup_subsys_state *css;
+ char *status = "";
+
+ css = &c->memcg_params.memcg->css;
+ if (!(css->flags & CSS_ONLINE))
+ status = ":dead";
+ else if (c->flags & SLAB_DEACTIVATED)
+ status = ":deact";
+
+ memset(&sinfo, 0, sizeof(sinfo));
+ get_slabinfo(c, &sinfo);
+ seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
+ cache_name(c), css->id, status,
+ sinfo.active_objs, sinfo.num_objs,
+ sinfo.active_slabs, sinfo.num_slabs);
+ }
+ }
+ mutex_unlock(&slab_mutex);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
+
+static int __init memcg_slabinfo_init(void)
+{
+ debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
+ NULL, NULL, &memcg_slabinfo_fops);
+ return 0;
+}
+
+late_initcall(memcg_slabinfo_init);
+#endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
@@ -1597,6 +1680,52 @@ void kzfree(const void *p)
}
EXPORT_SYMBOL(kzfree);
+/**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+ *
+ * kmalloc may internally round up allocations and return more memory
+ * than requested. ksize() can be used to determine the actual amount of
+ * memory allocated. The caller may use this additional memory, even though
+ * a smaller amount of memory was initially specified with the kmalloc call.
+ * The caller must guarantee that objp points to a valid object previously
+ * allocated with either kmalloc() or kmem_cache_alloc(). The object
+ * must not be freed during the duration of the call.
+ *
+ * Return: size of the actual memory used by @objp in bytes
+ */
+size_t ksize(const void *objp)
+{
+ size_t size;
+
+ if (WARN_ON_ONCE(!objp))
+ return 0;
+ /*
+ * We need to check that the pointed to object is valid, and only then
+ * unpoison the shadow memory below. We use __kasan_check_read(), to
+ * generate a more useful report at the time ksize() is called (rather
+ * than later where behaviour is undefined due to potential
+ * use-after-free or double-free).
+ *
+ * If the pointed to memory is invalid we return 0, to avoid users of
+ * ksize() writing to and potentially corrupting the memory region.
+ *
+ * We want to perform the check before __ksize(), to avoid potentially
+ * crashing in __ksize() due to accessing invalid metadata.
+ */
+ if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
+ return 0;
+
+ size = __ksize(objp);
+ /*
+ * We assume that ksize callers could use whole allocated area,
+ * so we need to unpoison this area.
+ */
+ kasan_unpoison_shadow(objp, size);
+ return size;
+}
+EXPORT_SYMBOL(ksize);
+
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/slob.c b/mm/slob.c
index 84aefd9b91ee..7f421d0ca9ab 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -527,7 +527,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
-size_t ksize(const void *block)
+size_t __ksize(const void *block)
{
struct page *sp;
int align;
@@ -545,7 +545,7 @@ size_t ksize(const void *block)
m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
diff --git a/mm/slub.c b/mm/slub.c
index cd04dbd2b5d0..e6c030e47364 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1279,6 +1279,10 @@ check_slabs:
if (*str == ',')
slub_debug_slabs = str + 1;
out:
+ if ((static_branch_unlikely(&init_on_alloc) ||
+ static_branch_unlikely(&init_on_free)) &&
+ (slub_debug & SLAB_POISON))
+ pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
return 1;
}
@@ -1313,9 +1317,7 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
char *end, *glob;
size_t cmplen;
- end = strchr(iter, ',');
- if (!end)
- end = iter + strlen(iter);
+ end = strchrnul(iter, ',');
glob = strnchr(iter, end - iter, '*');
if (glob)
@@ -1424,6 +1426,28 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void **head, void **tail)
{
+
+ void *object;
+ void *next = *head;
+ void *old_tail = *tail ? *tail : *head;
+ int rsize;
+
+ if (slab_want_init_on_free(s))
+ do {
+ object = next;
+ next = get_freepointer(s, object);
+ /*
+ * Clear the object and the metadata, but don't touch
+ * the redzone.
+ */
+ memset(object, 0, s->object_size);
+ rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
+ : 0;
+ memset((char *)object + s->inuse, 0,
+ s->size - s->inuse - rsize);
+ set_freepointer(s, object, next);
+ } while (object != old_tail);
+
/*
* Compiler cannot detect this function can be removed if slab_free_hook()
* evaluates to nothing. Thus, catch all relevant config debug options here.
@@ -1433,9 +1457,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
defined(CONFIG_KASAN)
- void *object;
- void *next = *head;
- void *old_tail = *tail ? *tail : *head;
+ next = *head;
/* Head and tail of the reconstructed freelist */
*head = NULL;
@@ -1490,7 +1512,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = __alloc_pages_node(node, flags, order);
- if (page && memcg_charge_slab(page, flags, order, s)) {
+ if (page && charge_slab_page(page, flags, order, s)) {
__free_pages(page, order);
page = NULL;
}
@@ -1683,11 +1705,6 @@ out:
if (!page)
return NULL;
- mod_lruvec_page_state(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- 1 << oo_order(oo));
-
inc_slabs_node(s, page_to_nid(page), page->objects);
return page;
@@ -1721,18 +1738,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE);
}
- mod_lruvec_page_state(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- -pages);
-
__ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
page->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- memcg_uncharge_slab(page, order, s);
+ uncharge_slab_page(page, order, s);
__free_pages(page, order);
}
@@ -2741,8 +2753,14 @@ redo:
prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH);
}
+ /*
+ * If the object has been wiped upon free, make sure it's fully
+ * initialized by zeroing out freelist pointer.
+ */
+ if (unlikely(slab_want_init_on_free(s)) && object)
+ memset(object + s->offset, 0, sizeof(void *));
- if (unlikely(gfpflags & __GFP_ZERO) && object)
+ if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
slab_post_alloc_hook(s, gfpflags, 1, &object);
@@ -3163,7 +3181,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
local_irq_enable();
/* Clear memory outside IRQ disabled fastpath loop */
- if (unlikely(flags & __GFP_ZERO)) {
+ if (unlikely(slab_want_init_on_alloc(flags, s))) {
int j;
for (j = 0; j < i; j++)
@@ -3652,10 +3670,6 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
free_kmem_cache_nodes(s);
error:
- if (flags & SLAB_PANIC)
- panic("Cannot create slab %s size=%u realsize=%u order=%u offset=%u flags=%lx\n",
- s->name, s->size, s->size,
- oo_order(s->oo), s->offset, (unsigned long)flags);
return -EINVAL;
}
@@ -3901,7 +3915,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
}
#endif /* CONFIG_HARDENED_USERCOPY */
-static size_t __ksize(const void *object)
+size_t __ksize(const void *object)
{
struct page *page;
@@ -3917,17 +3931,7 @@ static size_t __ksize(const void *object)
return slab_ksize(page->slab_cache);
}
-
-size_t ksize(const void *object)
-{
- size_t size = __ksize(object);
- /* We assume that ksize callers could use whole allocated area,
- * so we need to unpoison this area.
- */
- kasan_unpoison_shadow(object, size);
- return size;
-}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(__ksize);
void kfree(const void *x)
{
@@ -4024,7 +4028,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
}
#ifdef CONFIG_MEMCG
-static void kmemcg_cache_deact_after_rcu(struct kmem_cache *s)
+void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
{
/*
* Called with all the locks held after a sched RCU grace period.
@@ -4050,12 +4054,6 @@ void __kmemcg_cache_deactivate(struct kmem_cache *s)
*/
slub_set_cpu_partial(s, 0);
s->min_partial = 0;
-
- /*
- * s->cpu_partial is checked locklessly (see put_cpu_partial), so
- * we have to make sure the change is visible before shrinking.
- */
- slab_deactivate_memcg_cache_rcu_sched(s, kmemcg_cache_deact_after_rcu);
}
#endif /* CONFIG_MEMCG */
@@ -4215,7 +4213,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
}
slab_init_memcg_params(s);
list_add(&s->list, &slab_caches);
- memcg_link_cache(s);
+ memcg_link_cache(s, NULL);
return s;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 85245fdec8d9..8368621a0fc7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -73,23 +73,24 @@ unsigned long total_swapcache_pages(void)
unsigned int i, j, nr;
unsigned long ret = 0;
struct address_space *spaces;
+ struct swap_info_struct *si;
- rcu_read_lock();
for (i = 0; i < MAX_SWAPFILES; i++) {
- /*
- * The corresponding entries in nr_swapper_spaces and
- * swapper_spaces will be reused only after at least
- * one grace period. So it is impossible for them
- * belongs to different usage.
- */
- nr = nr_swapper_spaces[i];
- spaces = rcu_dereference(swapper_spaces[i]);
- if (!nr || !spaces)
+ swp_entry_t entry = swp_entry(i, 1);
+
+ /* Avoid get_swap_device() to warn for bad swap entry */
+ if (!swp_swap_info(entry))
+ continue;
+ /* Prevent swapoff to free swapper_spaces */
+ si = get_swap_device(entry);
+ if (!si)
continue;
+ nr = nr_swapper_spaces[i];
+ spaces = swapper_spaces[i];
for (j = 0; j < nr; j++)
ret += spaces[j].nrpages;
+ put_swap_device(si);
}
- rcu_read_unlock();
return ret;
}
@@ -310,8 +311,13 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page;
+ struct swap_info_struct *si;
+ si = get_swap_device(entry);
+ if (!si)
+ return NULL;
page = find_get_page(swap_address_space(entry), swp_offset(entry));
+ put_swap_device(si);
INC_CACHE_INFO(find_total);
if (page) {
@@ -354,8 +360,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated)
{
- struct page *found_page, *new_page = NULL;
- struct address_space *swapper_space = swap_address_space(entry);
+ struct page *found_page = NULL, *new_page = NULL;
+ struct swap_info_struct *si;
int err;
*new_page_allocated = false;
@@ -365,7 +371,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
- found_page = find_get_page(swapper_space, swp_offset(entry));
+ si = get_swap_device(entry);
+ if (!si)
+ break;
+ found_page = find_get_page(swap_address_space(entry),
+ swp_offset(entry));
+ put_swap_device(si);
if (found_page)
break;
@@ -601,20 +612,16 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
mapping_set_no_writeback_tags(space);
}
nr_swapper_spaces[type] = nr;
- rcu_assign_pointer(swapper_spaces[type], spaces);
+ swapper_spaces[type] = spaces;
return 0;
}
void exit_swap_address_space(unsigned int type)
{
- struct address_space *spaces;
-
- spaces = swapper_spaces[type];
+ kvfree(swapper_spaces[type]);
nr_swapper_spaces[type] = 0;
- rcu_assign_pointer(swapper_spaces[type], NULL);
- synchronize_rcu();
- kvfree(spaces);
+ swapper_spaces[type] = NULL;
}
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 596ac98051c5..0789a762ce2f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -152,6 +152,18 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
return ret;
}
+static inline struct swap_extent *first_se(struct swap_info_struct *sis)
+{
+ struct rb_node *rb = rb_first(&sis->swap_extent_root);
+ return rb_entry(rb, struct swap_extent, rb_node);
+}
+
+static inline struct swap_extent *next_se(struct swap_extent *se)
+{
+ struct rb_node *rb = rb_next(&se->rb_node);
+ return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
+}
+
/*
* swapon tell device that all the old swap contents can be discarded,
* to allow the swap device to optimize its wear-levelling.
@@ -164,7 +176,7 @@ static int discard_swap(struct swap_info_struct *si)
int err = 0;
/* Do not discard the swap header page! */
- se = &si->first_swap_extent;
+ se = first_se(si);
start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
if (nr_blocks) {
@@ -175,7 +187,7 @@ static int discard_swap(struct swap_info_struct *si)
cond_resched();
}
- list_for_each_entry(se, &si->first_swap_extent.list, list) {
+ for (se = next_se(se); se; se = next_se(se)) {
start_block = se->start_block << (PAGE_SHIFT - 9);
nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
@@ -189,6 +201,26 @@ static int discard_swap(struct swap_info_struct *si)
return err; /* That will often be -EOPNOTSUPP */
}
+static struct swap_extent *
+offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
+{
+ struct swap_extent *se;
+ struct rb_node *rb;
+
+ rb = sis->swap_extent_root.rb_node;
+ while (rb) {
+ se = rb_entry(rb, struct swap_extent, rb_node);
+ if (offset < se->start_page)
+ rb = rb->rb_left;
+ else if (offset >= se->start_page + se->nr_pages)
+ rb = rb->rb_right;
+ else
+ return se;
+ }
+ /* It *must* be present */
+ BUG();
+}
+
/*
* swap allocation tell device that a cluster of swap can now be discarded,
* to allow the swap device to optimize its wear-levelling.
@@ -196,32 +228,25 @@ static int discard_swap(struct swap_info_struct *si)
static void discard_swap_cluster(struct swap_info_struct *si,
pgoff_t start_page, pgoff_t nr_pages)
{
- struct swap_extent *se = si->curr_swap_extent;
- int found_extent = 0;
+ struct swap_extent *se = offset_to_swap_extent(si, start_page);
while (nr_pages) {
- if (se->start_page <= start_page &&
- start_page < se->start_page + se->nr_pages) {
- pgoff_t offset = start_page - se->start_page;
- sector_t start_block = se->start_block + offset;
- sector_t nr_blocks = se->nr_pages - offset;
-
- if (nr_blocks > nr_pages)
- nr_blocks = nr_pages;
- start_page += nr_blocks;
- nr_pages -= nr_blocks;
-
- if (!found_extent++)
- si->curr_swap_extent = se;
-
- start_block <<= PAGE_SHIFT - 9;
- nr_blocks <<= PAGE_SHIFT - 9;
- if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO, 0))
- break;
- }
+ pgoff_t offset = start_page - se->start_page;
+ sector_t start_block = se->start_block + offset;
+ sector_t nr_blocks = se->nr_pages - offset;
+
+ if (nr_blocks > nr_pages)
+ nr_blocks = nr_pages;
+ start_page += nr_blocks;
+ nr_pages -= nr_blocks;
+
+ start_block <<= PAGE_SHIFT - 9;
+ nr_blocks <<= PAGE_SHIFT - 9;
+ if (blkdev_issue_discard(si->bdev, start_block,
+ nr_blocks, GFP_NOIO, 0))
+ break;
- se = list_next_entry(se, list);
+ se = next_se(se);
}
}
@@ -1079,12 +1104,11 @@ fail:
static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
{
struct swap_info_struct *p;
- unsigned long offset, type;
+ unsigned long offset;
if (!entry.val)
goto out;
- type = swp_type(entry);
- p = swap_type_to_swap_info(type);
+ p = swp_swap_info(entry);
if (!p)
goto bad_nofile;
if (!(p->flags & SWP_USED))
@@ -1187,6 +1211,69 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
return usage;
}
+/*
+ * Check whether swap entry is valid in the swap device. If so,
+ * return pointer to swap_info_struct, and keep the swap entry valid
+ * via preventing the swap device from being swapoff, until
+ * put_swap_device() is called. Otherwise return NULL.
+ *
+ * The entirety of the RCU read critical section must come before the
+ * return from or after the call to synchronize_rcu() in
+ * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is
+ * true, the si->map, si->cluster_info, etc. must be valid in the
+ * critical section.
+ *
+ * Notice that swapoff or swapoff+swapon can still happen before the
+ * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
+ * in put_swap_device() if there isn't any other way to prevent
+ * swapoff, such as page lock, page table lock, etc. The caller must
+ * be prepared for that. For example, the following situation is
+ * possible.
+ *
+ * CPU1 CPU2
+ * do_swap_page()
+ * ... swapoff+swapon
+ * __read_swap_cache_async()
+ * swapcache_prepare()
+ * __swap_duplicate()
+ * // check swap_map
+ * // verify PTE not changed
+ *
+ * In __swap_duplicate(), the swap_map need to be checked before
+ * changing partly because the specified swap entry may be for another
+ * swap device which has been swapoff. And in do_swap_page(), after
+ * the page is read from the swap device, the PTE is verified not
+ * changed with the page table locked to check whether the swap device
+ * has been swapoff or swapoff+swapon.
+ */
+struct swap_info_struct *get_swap_device(swp_entry_t entry)
+{
+ struct swap_info_struct *si;
+ unsigned long offset;
+
+ if (!entry.val)
+ goto out;
+ si = swp_swap_info(entry);
+ if (!si)
+ goto bad_nofile;
+
+ rcu_read_lock();
+ if (!(si->flags & SWP_VALID))
+ goto unlock_out;
+ offset = swp_offset(entry);
+ if (offset >= si->max)
+ goto unlock_out;
+
+ return si;
+bad_nofile:
+ pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
+out:
+ return NULL;
+unlock_out:
+ rcu_read_unlock();
+ return NULL;
+}
+
static unsigned char __swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
@@ -1358,11 +1445,18 @@ int page_swapcount(struct page *page)
return count;
}
-int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
+int __swap_count(swp_entry_t entry)
{
+ struct swap_info_struct *si;
pgoff_t offset = swp_offset(entry);
+ int count = 0;
- return swap_count(si->swap_map[offset]);
+ si = get_swap_device(entry);
+ if (si) {
+ count = swap_count(si->swap_map[offset]);
+ put_swap_device(si);
+ }
+ return count;
}
static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
@@ -1387,9 +1481,11 @@ int __swp_swapcount(swp_entry_t entry)
int count = 0;
struct swap_info_struct *si;
- si = __swap_info_get(entry);
- if (si)
+ si = get_swap_device(entry);
+ if (si) {
count = swap_swapcount(si, entry);
+ put_swap_device(si);
+ }
return count;
}
@@ -1684,7 +1780,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
return type;
}
if (bdev == sis->bdev) {
- struct swap_extent *se = &sis->first_swap_extent;
+ struct swap_extent *se = first_se(sis);
if (se->start_block == offset) {
if (bdev_p)
@@ -2161,7 +2257,6 @@ static void drain_mmlist(void)
static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
struct swap_info_struct *sis;
- struct swap_extent *start_se;
struct swap_extent *se;
pgoff_t offset;
@@ -2169,18 +2264,8 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
*bdev = sis->bdev;
offset = swp_offset(entry);
- start_se = sis->curr_swap_extent;
- se = start_se;
-
- for ( ; ; ) {
- if (se->start_page <= offset &&
- offset < (se->start_page + se->nr_pages)) {
- return se->start_block + (offset - se->start_page);
- }
- se = list_next_entry(se, list);
- sis->curr_swap_extent = se;
- BUG_ON(se == start_se); /* It *must* be present */
- }
+ se = offset_to_swap_extent(sis, offset);
+ return se->start_block + (offset - se->start_page);
}
/*
@@ -2198,12 +2283,11 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev)
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
- while (!list_empty(&sis->first_swap_extent.list)) {
- struct swap_extent *se;
+ while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
+ struct rb_node *rb = sis->swap_extent_root.rb_node;
+ struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
- se = list_first_entry(&sis->first_swap_extent.list,
- struct swap_extent, list);
- list_del(&se->list);
+ rb_erase(rb, &sis->swap_extent_root);
kfree(se);
}
@@ -2219,7 +2303,7 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
/*
* Add a block range (and the corresponding page range) into this swapdev's
- * extent list. The extent list is kept sorted in page order.
+ * extent tree.
*
* This function rather assumes that it is called in ascending page order.
*/
@@ -2227,20 +2311,21 @@ int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
+ struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
struct swap_extent *se;
struct swap_extent *new_se;
- struct list_head *lh;
-
- if (start_page == 0) {
- se = &sis->first_swap_extent;
- sis->curr_swap_extent = se;
- se->start_page = 0;
- se->nr_pages = nr_pages;
- se->start_block = start_block;
- return 1;
- } else {
- lh = sis->first_swap_extent.list.prev; /* Highest extent */
- se = list_entry(lh, struct swap_extent, list);
+
+ /*
+ * place the new node at the right most since the
+ * function is called in ascending page order.
+ */
+ while (*link) {
+ parent = *link;
+ link = &parent->rb_right;
+ }
+
+ if (parent) {
+ se = rb_entry(parent, struct swap_extent, rb_node);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
@@ -2249,9 +2334,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
}
}
- /*
- * No merge. Insert a new extent, preserving ordering.
- */
+ /* No merge, insert a new extent. */
new_se = kmalloc(sizeof(*se), GFP_KERNEL);
if (new_se == NULL)
return -ENOMEM;
@@ -2259,7 +2342,8 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
- list_add_tail(&new_se->list, &sis->first_swap_extent.list);
+ rb_link_node(&new_se->rb_node, parent, link);
+ rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
return 1;
}
EXPORT_SYMBOL_GPL(add_swap_extent);
@@ -2335,9 +2419,9 @@ static int swap_node(struct swap_info_struct *p)
return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
}
-static void _enable_swap_info(struct swap_info_struct *p, int prio,
- unsigned char *swap_map,
- struct swap_cluster_info *cluster_info)
+static void setup_swap_info(struct swap_info_struct *p, int prio,
+ unsigned char *swap_map,
+ struct swap_cluster_info *cluster_info)
{
int i;
@@ -2362,7 +2446,11 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
}
p->swap_map = swap_map;
p->cluster_info = cluster_info;
- p->flags |= SWP_WRITEOK;
+}
+
+static void _enable_swap_info(struct swap_info_struct *p)
+{
+ p->flags |= SWP_WRITEOK | SWP_VALID;
atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
@@ -2389,7 +2477,17 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
frontswap_init(p->type, frontswap_map);
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, prio, swap_map, cluster_info);
+ setup_swap_info(p, prio, swap_map, cluster_info);
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ /*
+ * Guarantee swap_map, cluster_info, etc. fields are valid
+ * between get/put_swap_device() if SWP_VALID bit is set
+ */
+ synchronize_rcu();
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+ _enable_swap_info(p);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -2398,7 +2496,8 @@ static void reinsert_swap_info(struct swap_info_struct *p)
{
spin_lock(&swap_lock);
spin_lock(&p->lock);
- _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
+ setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
+ _enable_swap_info(p);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
}
@@ -2501,6 +2600,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
reenable_swap_slots_cache_unlock();
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+ p->flags &= ~SWP_VALID; /* mark swap device as invalid */
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ /*
+ * wait for swap operations protected by get/put_swap_device()
+ * to complete
+ */
+ synchronize_rcu();
+
flush_work(&p->discard_work);
destroy_swap_extents(p);
@@ -2749,7 +2859,7 @@ static struct swap_info_struct *alloc_swap_info(void)
* would be relying on p->type to remain valid.
*/
}
- INIT_LIST_HEAD(&p->first_swap_extent.list);
+ p->swap_extent_root = RB_ROOT;
plist_node_init(&p->list, 0);
for_each_node(i)
plist_node_init(&p->avail_lists[i], 0);
@@ -3265,17 +3375,11 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
unsigned char has_cache;
int err = -EINVAL;
- if (non_swap_entry(entry))
- goto out;
-
- p = swp_swap_info(entry);
+ p = get_swap_device(entry);
if (!p)
- goto bad_file;
-
- offset = swp_offset(entry);
- if (unlikely(offset >= p->max))
goto out;
+ offset = swp_offset(entry);
ci = lock_cluster_or_swap_info(p, offset);
count = p->swap_map[offset];
@@ -3321,11 +3425,9 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
unlock_out:
unlock_cluster_or_swap_info(p, ci);
out:
+ if (p)
+ put_swap_device(p);
return err;
-
-bad_file:
- pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
- goto out;
}
/*
@@ -3417,6 +3519,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
struct page *list_page;
pgoff_t offset;
unsigned char count;
+ int ret = 0;
/*
* When debugging, it's easier to use __GFP_ZERO here; but it's better
@@ -3424,15 +3527,15 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
*/
page = alloc_page(gfp_mask | __GFP_HIGHMEM);
- si = swap_info_get(entry);
+ si = get_swap_device(entry);
if (!si) {
/*
* An acceptable race has occurred since the failing
- * __swap_duplicate(): the swap entry has been freed,
- * perhaps even the whole swap_map cleared for swapoff.
+ * __swap_duplicate(): the swap device may be swapoff
*/
goto outer;
}
+ spin_lock(&si->lock);
offset = swp_offset(entry);
@@ -3450,9 +3553,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
}
if (!page) {
- unlock_cluster(ci);
- spin_unlock(&si->lock);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
/*
@@ -3504,10 +3606,11 @@ out_unlock_cont:
out:
unlock_cluster(ci);
spin_unlock(&si->lock);
+ put_swap_device(si);
outer:
if (page)
__free_page(page);
- return 0;
+ return ret;
}
/*
diff --git a/mm/util.c b/mm/util.c
index 9834c4ab7d8e..68575a315dc5 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -300,53 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
}
#endif
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- * If the architecture does not support this function, simply return with no
- * pages pinned.
- */
-int __weak __get_user_pages_fast(unsigned long start,
- int nr_pages, int write, struct page **pages)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(__get_user_pages_fast);
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @gup_flags: flags modifying pin behaviour
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * get_user_pages_fast provides equivalent functionality to get_user_pages,
- * operating on current and current->mm, with force=0 and vma=NULL. However
- * unlike get_user_pages, it must be called without mmap_sem held.
- *
- * get_user_pages_fast may take mmap_sem and page table locks, so no
- * assumptions can be made about lack of locking. get_user_pages_fast is to be
- * implemented in a way that is advantageous (vs get_user_pages()) when the
- * user memory area is already faulted in and present in ptes. However if the
- * pages have to be faulted in, it may turn out to be slightly slower so
- * callers need to carefully consider what to use. On many architectures,
- * get_user_pages_fast simply falls back to get_user_pages.
- *
- * Return: number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int __weak get_user_pages_fast(unsigned long start,
- int nr_pages, unsigned int gup_flags,
- struct page **pages)
-{
- return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
-}
-EXPORT_SYMBOL_GPL(get_user_pages_fast);
-
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 030a544e6602..4fa8d84599b0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -365,6 +365,13 @@ static LIST_HEAD(free_vmap_area_list);
*/
static struct rb_root free_vmap_area_root = RB_ROOT;
+/*
+ * Preload a CPU with one object for "no edge" split case. The
+ * aim is to get rid of allocations from the atomic context, thus
+ * to use more permissive allocation masks.
+ */
+static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
+
static __always_inline unsigned long
va_size(struct vmap_area *va)
{
@@ -399,6 +406,13 @@ static void purge_vmap_area_lazy(void);
static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
static unsigned long lazy_max_pages(void);
+static atomic_long_t nr_vmalloc_pages;
+
+unsigned long vmalloc_nr_pages(void)
+{
+ return atomic_long_read(&nr_vmalloc_pages);
+}
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
@@ -527,20 +541,17 @@ link_va(struct vmap_area *va, struct rb_root *root,
static __always_inline void
unlink_va(struct vmap_area *va, struct rb_root *root)
{
- /*
- * During merging a VA node can be empty, therefore
- * not linked with the tree nor list. Just check it.
- */
- if (!RB_EMPTY_NODE(&va->rb_node)) {
- if (root == &free_vmap_area_root)
- rb_erase_augmented(&va->rb_node,
- root, &free_vmap_area_rb_augment_cb);
- else
- rb_erase(&va->rb_node, root);
+ if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
+ return;
- list_del(&va->list);
- RB_CLEAR_NODE(&va->rb_node);
- }
+ if (root == &free_vmap_area_root)
+ rb_erase_augmented(&va->rb_node,
+ root, &free_vmap_area_rb_augment_cb);
+ else
+ rb_erase(&va->rb_node, root);
+
+ list_del(&va->list);
+ RB_CLEAR_NODE(&va->rb_node);
}
#if DEBUG_AUGMENT_PROPAGATE_CHECK
@@ -712,9 +723,6 @@ merge_or_add_vmap_area(struct vmap_area *va,
/* Check and update the tree if needed. */
augment_tree_propagate_from(sibling);
- /* Remove this VA, it has been merged. */
- unlink_va(va, root);
-
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
@@ -739,12 +747,11 @@ merge_or_add_vmap_area(struct vmap_area *va,
/* Check and update the tree if needed. */
augment_tree_propagate_from(sibling);
- /* Remove this VA, it has been merged. */
- unlink_va(va, root);
+ if (merged)
+ unlink_va(va, root);
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
-
return;
}
}
@@ -951,9 +958,24 @@ adjust_va_to_fit_type(struct vmap_area *va,
* L V NVA V R
* |---|-------|---|
*/
- lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
- if (unlikely(!lva))
- return -1;
+ lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
+ if (unlikely(!lva)) {
+ /*
+ * For percpu allocator we do not do any pre-allocation
+ * and leave it as it is. The reason is it most likely
+ * never ends up with NE_FIT_TYPE splitting. In case of
+ * percpu allocations offsets and sizes are aligned to
+ * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
+ * are its main fitting cases.
+ *
+ * There are a few exceptions though, as an example it is
+ * a first allocation (early boot up) when we have "one"
+ * big free space that has to be split.
+ */
+ lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
+ if (!lva)
+ return -1;
+ }
/*
* Build the remainder.
@@ -986,7 +1008,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
*/
static __always_inline unsigned long
__alloc_vmap_area(unsigned long size, unsigned long align,
- unsigned long vstart, unsigned long vend, int node)
+ unsigned long vstart, unsigned long vend)
{
unsigned long nva_start_addr;
struct vmap_area *va;
@@ -1032,7 +1054,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long vstart, unsigned long vend,
int node, gfp_t gfp_mask)
{
- struct vmap_area *va;
+ struct vmap_area *va, *pva;
unsigned long addr;
int purged = 0;
@@ -1057,13 +1079,38 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
retry:
+ /*
+ * Preload this CPU with one extra vmap_area object to ensure
+ * that we have it available when fit type of free area is
+ * NE_FIT_TYPE.
+ *
+ * The preload is done in non-atomic context, thus it allows us
+ * to use more permissive allocation masks to be more stable under
+ * low memory condition and high memory pressure.
+ *
+ * Even if it fails we do not really care about that. Just proceed
+ * as it is. "overflow" path will refill the cache we allocate from.
+ */
+ preempt_disable();
+ if (!__this_cpu_read(ne_fit_preload_node)) {
+ preempt_enable();
+ pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
+ preempt_disable();
+
+ if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
+ if (pva)
+ kmem_cache_free(vmap_area_cachep, pva);
+ }
+ }
+
spin_lock(&vmap_area_lock);
+ preempt_enable();
/*
* If an allocation fails, the "vend" address is
* returned. Therefore trigger the overflow path.
*/
- addr = __alloc_vmap_area(size, align, vstart, vend, node);
+ addr = __alloc_vmap_area(size, align, vstart, vend);
if (unlikely(addr == vend))
goto overflow;
@@ -1119,8 +1166,6 @@ EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
static void __free_vmap_area(struct vmap_area *va)
{
- BUG_ON(RB_EMPTY_NODE(&va->rb_node));
-
/*
* Remove from the busy tree/list.
*/
@@ -2199,6 +2244,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
BUG_ON(!page);
__free_pages(page, 0);
}
+ atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
kvfree(area->pages);
}
@@ -2376,12 +2422,14 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
goto fail;
}
area->pages[i] = page;
if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
cond_resched();
}
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
if (map_vm_area(area, prot, pages))
goto fail;
@@ -2774,7 +2822,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
- * any informaion, as /dev/kmem.
+ * any information, as /dev/kmem.
*
* Return: number of bytes for which addr and buf should be increased
* (same number as @count) or %0 if [addr...addr+count) doesn't
@@ -2853,7 +2901,7 @@ finished:
* Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
* This is for routines which have to access vmalloc area without
- * any informaion, as /dev/kmem.
+ * any information, as /dev/kmem.
*
* Return: number of bytes for which addr and buf should be
* increased (same number as @count) or %0 if [addr...addr+count)
@@ -2996,7 +3044,7 @@ void __weak vmalloc_sync_all(void)
}
-static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
+static int f(pte_t *pte, unsigned long addr, void *data)
{
pte_t ***p = data;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 910e02c793ff..f8e3dcd527b8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1118,6 +1118,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
int may_enter_fs;
enum page_references references = PAGEREF_RECLAIM_CLEAN;
bool dirty, writeback;
+ unsigned int nr_pages;
cond_resched();
@@ -1129,7 +1130,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
VM_BUG_ON_PAGE(PageActive(page), page);
- sc->nr_scanned++;
+ nr_pages = 1 << compound_order(page);
+
+ /* Account the number of base pages even though THP */
+ sc->nr_scanned += nr_pages;
if (unlikely(!page_evictable(page)))
goto activate_locked;
@@ -1137,11 +1141,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
- /* Double the slab pressure for mapped and swapcache pages */
- if ((page_mapped(page) || PageSwapCache(page)) &&
- !(PageAnon(page) && !PageSwapBacked(page)))
- sc->nr_scanned++;
-
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
@@ -1255,7 +1254,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGEREF_ACTIVATE:
goto activate_locked;
case PAGEREF_KEEP:
- stat->nr_ref_keep++;
+ stat->nr_ref_keep += nr_pages;
goto keep_locked;
case PAGEREF_RECLAIM:
case PAGEREF_RECLAIM_CLEAN:
@@ -1287,7 +1286,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
if (!add_to_swap(page)) {
if (!PageTransHuge(page))
- goto activate_locked;
+ goto activate_locked_split;
/* Fallback to swap normal pages */
if (split_huge_page_to_list(page,
page_list))
@@ -1296,7 +1295,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
count_vm_event(THP_SWPOUT_FALLBACK);
#endif
if (!add_to_swap(page))
- goto activate_locked;
+ goto activate_locked_split;
}
may_enter_fs = 1;
@@ -1311,6 +1310,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
/*
+ * THP may get split above, need minus tail pages and update
+ * nr_pages to avoid accounting tail pages twice.
+ *
+ * The tail pages that are added into swap cache successfully
+ * reach here.
+ */
+ if ((nr_pages > 1) && !PageTransHuge(page)) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
+
+ /*
* The page is mapped into the page tables of one or more
* processes. Try to unmap it here.
*/
@@ -1320,7 +1331,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags)) {
- stat->nr_unmap_fail++;
+ stat->nr_unmap_fail += nr_pages;
goto activate_locked;
}
}
@@ -1447,7 +1458,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
unlock_page(page);
free_it:
- nr_reclaimed++;
+ /*
+ * THP may get swapped out in a whole, need account
+ * all base pages.
+ */
+ nr_reclaimed += nr_pages;
/*
* Is there need to periodically free_page_list? It would
@@ -1460,6 +1475,15 @@ free_it:
list_add(&page->lru, &free_pages);
continue;
+activate_locked_split:
+ /*
+ * The tail pages that are failed to add into swap cache
+ * reach here. Fixup nr_scanned and nr_pages.
+ */
+ if (nr_pages > 1) {
+ sc->nr_scanned -= (nr_pages - 1);
+ nr_pages = 1;
+ }
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||
@@ -1469,8 +1493,7 @@ activate_locked:
if (!PageMlocked(page)) {
int type = page_is_file_cache(page);
SetPageActive(page);
- pgactivate++;
- stat->nr_activate[type] += hpage_nr_pages(page);
+ stat->nr_activate[type] += nr_pages;
count_memcg_page_event(page, PGACTIVATE);
}
keep_locked:
@@ -1480,6 +1503,8 @@ keep:
VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
}
+ pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
+
mem_cgroup_uncharge_list(&free_pages);
try_to_unmap_flush();
free_unref_page_list(&free_pages);
@@ -1651,10 +1676,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
LIST_HEAD(pages_skipped);
isolate_mode_t mode = (sc->may_unmap ? 0 : ISOLATE_UNMAPPED);
+ total_scan = 0;
scan = 0;
- for (total_scan = 0;
- scan < nr_to_scan && nr_taken < nr_to_scan && !list_empty(src);
- total_scan++) {
+ while (scan < nr_to_scan && !list_empty(src)) {
struct page *page;
page = lru_to_page(src);
@@ -1662,9 +1686,12 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
VM_BUG_ON_PAGE(!PageLRU(page), page);
+ nr_pages = 1 << compound_order(page);
+ total_scan += nr_pages;
+
if (page_zonenum(page) > sc->reclaim_idx) {
list_move(&page->lru, &pages_skipped);
- nr_skipped[page_zonenum(page)]++;
+ nr_skipped[page_zonenum(page)] += nr_pages;
continue;
}
@@ -1673,11 +1700,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* return with no isolated pages if the LRU mostly contains
* ineligible pages. This causes the VM to not reclaim any
* pages, triggering a premature OOM.
+ *
+ * Account all tail pages of THP. This would not cause
+ * premature OOM since __isolate_lru_page() returns -EBUSY
+ * only when the page is being freed somewhere else.
*/
- scan++;
+ scan += nr_pages;
switch (__isolate_lru_page(page, mode)) {
case 0:
- nr_pages = hpage_nr_pages(page);
nr_taken += nr_pages;
nr_zone_taken[page_zonenum(page)] += nr_pages;
list_move(&page->lru, dst);
@@ -2125,7 +2155,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
* 10TB 320 32GB
*/
static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
- struct scan_control *sc, bool actual_reclaim)
+ struct scan_control *sc, bool trace)
{
enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -2151,7 +2181,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
* rid of the stale workingset quickly.
*/
refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
- if (file && actual_reclaim && lruvec->refaults != refaults) {
+ if (file && lruvec->refaults != refaults) {
inactive_ratio = 0;
} else {
gb = (inactive + active) >> (30 - PAGE_SHIFT);
@@ -2161,7 +2191,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
inactive_ratio = 1;
}
- if (actual_reclaim)
+ if (trace)
trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 985732c8b025..dfcd69d08c1e 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -924,7 +924,16 @@ retry:
set_bit(PAGE_HEADLESS, &page->private);
goto headless;
}
- __SetPageMovable(page, pool->inode->i_mapping);
+ if (can_sleep) {
+ lock_page(page);
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ } else {
+ if (trylock_page(page)) {
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ }
+ }
z3fold_page_lock(zhdr);
found:
@@ -1331,6 +1340,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
zhdr = page_address(page);
pool = zhdr_to_pool(zhdr);
diff --git a/net/core/sock.c b/net/core/sock.c
index 3e073ca6138f..d57b0cc995a0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1597,7 +1597,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
if (!sk)
return sk;
- if (priority & __GFP_ZERO)
+ if (want_init_on_alloc(priority))
sk_prot_clear_nulls(sk, prot->obj_size);
} else
sk = kmalloc(prot->obj_size, priority);
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index a7a36209a193..13e5fbafdf2f 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -28,7 +28,7 @@ parse_symbol() {
local objfile=${modcache[$module]}
else
[[ $modpath == "" ]] && return
- local objfile=$(find "$modpath" -name $module.ko -print -quit)
+ local objfile=$(find "$modpath" -name "${module//_/[-_]}.ko*" -print -quit)
[[ $objfile == "" ]] && return
modcache[$module]=$objfile
fi
@@ -85,7 +85,7 @@ parse_symbol() {
fi
# Strip out the base of the path
- code=${code//^$basepath/""}
+ code=${code#$basepath/}
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 86b87332b9e5..de75b9feaaed 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -41,6 +41,7 @@ accquired||acquired
accross||across
acessable||accessible
acess||access
+acessing||accessing
achitecture||architecture
acient||ancient
acitions||actions
@@ -54,6 +55,7 @@ activete||activate
actived||activated
actualy||actually
acumulating||accumulating
+acumulative||accumulative
acumulator||accumulator
adapater||adapter
addional||additional
@@ -103,6 +105,7 @@ alogrithm||algorithm
alot||a lot
alow||allow
alows||allows
+alredy||already
altough||although
alue||value
ambigious||ambiguous
@@ -223,6 +226,7 @@ boardcast||broadcast
borad||board
boundry||boundary
brievely||briefly
+brigde||bridge
broadcase||broadcast
broadcat||broadcast
bufufer||buffer
@@ -239,6 +243,7 @@ calulate||calculate
cancelation||cancellation
cancle||cancel
capabilites||capabilities
+capabilties||capabilities
capabilty||capability
capabitilies||capabilities
capablity||capability
@@ -325,6 +330,7 @@ conector||connector
connecetd||connected
configuartion||configuration
configuation||configuration
+configued||configured
configuratoin||configuration
configuraton||configuration
configuretion||configuration
@@ -407,6 +413,7 @@ depreacte||deprecate
desactivate||deactivate
desciptor||descriptor
desciptors||descriptors
+descripto||descriptor
descripton||description
descrition||description
descritptor||descriptor
@@ -432,6 +439,7 @@ deveolpment||development
devided||divided
deviece||device
diable||disable
+dicline||decline
dictionnary||dictionary
didnt||didn't
diferent||different
@@ -461,6 +469,7 @@ disharge||discharge
disnabled||disabled
dispertion||dispersion
dissapears||disappears
+dissconect||disconnect
distiction||distinction
divisable||divisible
divsiors||divisors
@@ -469,11 +478,14 @@ documantation||documentation
documentaion||documentation
documment||document
doesnt||doesn't
+donwload||download
+donwloading||downloading
dorp||drop
dosen||doesn
downlad||download
downlads||downloads
droped||dropped
+droput||dropout
druing||during
dynmaic||dynamic
eanable||enable
@@ -482,6 +494,7 @@ ecspecially||especially
edditable||editable
editting||editing
efective||effective
+effectivness||effectiveness
efficently||efficiently
ehther||ether
eigth||eight
@@ -543,6 +556,7 @@ extensability||extensibility
extention||extension
extenstion||extension
extracter||extractor
+faied||failed
faield||failed
falied||failed
faild||failed
@@ -567,6 +581,7 @@ fetaures||features
fileystem||filesystem
fimware||firmware
firmare||firmware
+firmaware||firmware
firware||firmware
finanize||finalize
findn||find
@@ -601,6 +616,8 @@ funtions||functions
furthur||further
futhermore||furthermore
futrue||future
+gatable||gateable
+gateing||gating
gauage||gauge
gaurenteed||guaranteed
generiously||generously
@@ -641,9 +658,11 @@ iomaped||iomapped
imblance||imbalance
immeadiately||immediately
immedaite||immediate
+immedate||immediate
immediatelly||immediately
immediatly||immediately
immidiate||immediate
+immutible||immutable
impelentation||implementation
impementated||implemented
implemantation||implementation
@@ -661,10 +680,12 @@ incative||inactive
incomming||incoming
incompatabilities||incompatibilities
incompatable||incompatible
+incompatble||incompatible
inconsistant||inconsistent
increas||increase
incremeted||incremented
incrment||increment
+inculde||include
indendation||indentation
indended||intended
independant||independent
@@ -778,6 +799,7 @@ libary||library
librairies||libraries
libraris||libraries
licenceing||licencing
+logaritmic||logarithmic
loggging||logging
loggin||login
logile||logfile
@@ -832,6 +854,7 @@ mispelled||misspelled
mispelt||misspelt
mising||missing
mismactch||mismatch
+missign||missing
missmanaged||mismanaged
missmatch||mismatch
miximum||maximum
@@ -848,6 +871,7 @@ mopdule||module
mroe||more
mulitplied||multiplied
multidimensionnal||multidimensional
+multipe||multiple
multple||multiple
mumber||number
muticast||multicast
@@ -870,7 +894,9 @@ nescessary||necessary
nessessary||necessary
noticable||noticeable
notications||notifications
+notifcations||notifications
notifed||notified
+notity||notify
numebr||number
numner||number
obtaion||obtain
@@ -887,6 +913,7 @@ occuring||occurring
offser||offset
offet||offset
offloded||offloaded
+offseting||offsetting
omited||omitted
omiting||omitting
omitt||omit
@@ -1009,6 +1036,8 @@ programers||programmers
programm||program
programms||programs
progresss||progress
+prohibitted||prohibited
+prohibitting||prohibiting
promiscous||promiscuous
promps||prompts
pronnounced||pronounced
@@ -1023,6 +1052,7 @@ prosess||process
protable||portable
protcol||protocol
protecion||protection
+protedcted||protected
protocoll||protocol
promixity||proximity
psudo||pseudo
@@ -1037,6 +1067,7 @@ reasearcher||researcher
reasearchers||researchers
reasearch||research
recepient||recipient
+recevied||received
receving||receiving
recieved||received
recieve||receive
@@ -1110,6 +1141,7 @@ retreived||retrieved
retreive||retrieve
retreiving||retrieving
retrive||retrieve
+retrived||retrieved
retuned||returned
reudce||reduce
reuest||request
@@ -1145,7 +1177,6 @@ senarios||scenarios
sentivite||sensitive
separatly||separately
sepcify||specify
-sepc||spec
seperated||separated
seperately||separately
seperate||separate
@@ -1177,6 +1208,7 @@ singaled||signaled
singal||signal
singed||signed
sleeped||slept
+sliped||slipped
softwares||software
speach||speech
specfic||specific
@@ -1283,6 +1315,7 @@ threds||threads
threshhold||threshold
thresold||threshold
throught||through
+trackling||tracking
troughput||throughput
thses||these
tiggers||triggers
@@ -1409,5 +1442,6 @@ wnat||want
workarould||workaround
writeing||writing
writting||writing
+wtih||with
zombe||zombie
zomebie||zombie
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c6cb2d9b2905..a1ffe2eb4d5f 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -160,6 +160,35 @@ config STACKLEAK_RUNTIME_DISABLE
runtime to control kernel stack erasing for kernels built with
CONFIG_GCC_PLUGIN_STACKLEAK.
+config INIT_ON_ALLOC_DEFAULT_ON
+ bool "Enable heap memory zeroing on allocation by default"
+ help
+ This has the effect of setting "init_on_alloc=1" on the kernel
+ command line. This can be disabled with "init_on_alloc=0".
+ When "init_on_alloc" is enabled, all page allocator and slab
+ allocator memory will be zeroed when allocated, eliminating
+ many kinds of "uninitialized heap memory" flaws, especially
+ heap content exposures. The performance impact varies by
+ workload, but most cases see <1% impact. Some synthetic
+ workloads have measured as high as 7%.
+
+config INIT_ON_FREE_DEFAULT_ON
+ bool "Enable heap memory zeroing on free by default"
+ help
+ This has the effect of setting "init_on_free=1" on the kernel
+ command line. This can be disabled with "init_on_free=0".
+ Similar to "init_on_alloc", when "init_on_free" is enabled,
+ all page allocator and slab allocator memory will be zeroed
+ when freed, eliminating many kinds of "uninitialized heap memory"
+ flaws, especially heap content exposures. The primary difference
+ with "init_on_free" is that data lifetime in memory is reduced,
+ as anything freed is wiped immediately, making live forensics or
+ cold boot memory attacks unable to recover freed memory contents.
+ The performance impact varies by workload, but is more expensive
+ than "init_on_alloc" due to the negative cache effects of
+ touching "cold" memory areas. Most cases see 3-5% impact. Some
+ synthetic workloads have measured as high as 8%.
+
endmenu
endmenu
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 73818f1b2ef8..68092d15e12b 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -79,6 +79,7 @@ int sort_size;
int sort_active;
int set_debug;
int show_ops;
+int sort_partial;
int show_activity;
int output_lines = -1;
int sort_loss;
@@ -110,7 +111,7 @@ static void fatal(const char *x, ...)
static void usage(void)
{
printf("slabinfo 4/15/2011. (c) 2007 sgi/(c) 2011 Linux Foundation.\n\n"
- "slabinfo [-aADefhilnosrStTvz1LXBU] [N=K] [-dafzput] [slab-regexp]\n"
+ "slabinfo [-aABDefhilLnoPrsStTUvXz1] [N=K] [-dafzput] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
"-A|--activity Most active slabs first\n"
"-B|--Bytes Show size in bytes\n"
@@ -124,6 +125,7 @@ static void usage(void)
"-n|--numa Show NUMA information\n"
"-N|--lines=K Show the first K slabs\n"
"-o|--ops Show kmem_cache_ops\n"
+ "-P|--partial Sort by number of partial slabs\n"
"-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
"-S|--Size Sort by size\n"
@@ -131,9 +133,9 @@ static void usage(void)
"-T|--Totals Show summary information\n"
"-U|--Unreclaim Show unreclaimable slabs only\n"
"-v|--validate Validate slabs\n"
+ "-X|--Xtotals Show extended summary information\n"
"-z|--zero Include empty slabs\n"
"-1|--1ref Single reference\n"
- "-X|--Xtotals Show extended summary information\n"
"\n"
"-d | --debug Switch off all debug options\n"
@@ -146,6 +148,8 @@ static void usage(void)
" p | P Poisoning\n"
" u | U Tracking\n"
" t | T Tracing\n"
+
+ "\nSorting options (--Loss, --Size, --Partial) are mutually exclusive\n"
);
}
@@ -1047,6 +1051,8 @@ static void sort_slabs(void)
result = slab_activity(s1) < slab_activity(s2);
else if (sort_loss)
result = slab_waste(s1) < slab_waste(s2);
+ else if (sort_partial)
+ result = s1->partial < s2->partial;
else
result = strcasecmp(s1->name, s2->name);
@@ -1307,33 +1313,46 @@ static void output_slabs(void)
}
}
+static void _xtotals(char *heading, char *underline,
+ int loss, int size, int partial)
+{
+ printf("%s%s", heading, underline);
+ line = 0;
+ sort_loss = loss;
+ sort_size = size;
+ sort_partial = partial;
+ sort_slabs();
+ output_slabs();
+}
+
static void xtotals(void)
{
+ char *heading, *underline;
+
totals();
link_slabs();
rename_slabs();
- printf("\nSlabs sorted by size\n");
- printf("--------------------\n");
- sort_loss = 0;
- sort_size = 1;
- sort_slabs();
- output_slabs();
+ heading = "\nSlabs sorted by size\n";
+ underline = "--------------------\n";
+ _xtotals(heading, underline, 0, 1, 0);
+
+ heading = "\nSlabs sorted by loss\n";
+ underline = "--------------------\n";
+ _xtotals(heading, underline, 1, 0, 0);
+
+ heading = "\nSlabs sorted by number of partial slabs\n";
+ underline = "---------------------------------------\n";
+ _xtotals(heading, underline, 0, 0, 1);
- printf("\nSlabs sorted by loss\n");
- printf("--------------------\n");
- line = 0;
- sort_loss = 1;
- sort_size = 0;
- sort_slabs();
- output_slabs();
printf("\n");
}
struct option opts[] = {
{ "aliases", no_argument, NULL, 'a' },
{ "activity", no_argument, NULL, 'A' },
+ { "Bytes", no_argument, NULL, 'B'},
{ "debug", optional_argument, NULL, 'd' },
{ "display-activity", no_argument, NULL, 'D' },
{ "empty", no_argument, NULL, 'e' },
@@ -1341,21 +1360,21 @@ struct option opts[] = {
{ "help", no_argument, NULL, 'h' },
{ "inverted", no_argument, NULL, 'i'},
{ "slabs", no_argument, NULL, 'l' },
+ { "Loss", no_argument, NULL, 'L'},
{ "numa", no_argument, NULL, 'n' },
+ { "lines", required_argument, NULL, 'N'},
{ "ops", no_argument, NULL, 'o' },
- { "shrink", no_argument, NULL, 's' },
+ { "partial", no_argument, NULL, 'p'},
{ "report", no_argument, NULL, 'r' },
+ { "shrink", no_argument, NULL, 's' },
{ "Size", no_argument, NULL, 'S'},
{ "tracking", no_argument, NULL, 't'},
{ "Totals", no_argument, NULL, 'T'},
+ { "Unreclaim", no_argument, NULL, 'U'},
{ "validate", no_argument, NULL, 'v' },
+ { "Xtotals", no_argument, NULL, 'X'},
{ "zero", no_argument, NULL, 'z' },
{ "1ref", no_argument, NULL, '1'},
- { "lines", required_argument, NULL, 'N'},
- { "Loss", no_argument, NULL, 'L'},
- { "Xtotals", no_argument, NULL, 'X'},
- { "Bytes", no_argument, NULL, 'B'},
- { "Unreclaim", no_argument, NULL, 'U'},
{ NULL, 0, NULL, 0 }
};
@@ -1367,18 +1386,18 @@ int main(int argc, char *argv[])
page_size = getpagesize();
- while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXBU",
+ while ((c = getopt_long(argc, argv, "aABd::DefhilLnN:oPrsStTUvXz1",
opts, NULL)) != -1)
switch (c) {
- case '1':
- show_single_ref = 1;
- break;
case 'a':
show_alias = 1;
break;
case 'A':
sort_active = 1;
break;
+ case 'B':
+ show_bytes = 1;
+ break;
case 'd':
set_debug = 1;
if (!debug_opt_scan(optarg))
@@ -1399,45 +1418,48 @@ int main(int argc, char *argv[])
case 'i':
show_inverted = 1;
break;
+ case 'l':
+ show_slab = 1;
+ break;
+ case 'L':
+ sort_loss = 1;
+ break;
case 'n':
show_numa = 1;
break;
+ case 'N':
+ if (optarg) {
+ output_lines = atoi(optarg);
+ if (output_lines < 1)
+ output_lines = 1;
+ }
+ break;
case 'o':
show_ops = 1;
break;
case 'r':
show_report = 1;
break;
+ case 'P':
+ sort_partial = 1;
+ break;
case 's':
shrink = 1;
break;
- case 'l':
- show_slab = 1;
+ case 'S':
+ sort_size = 1;
break;
case 't':
show_track = 1;
break;
- case 'v':
- validate = 1;
- break;
- case 'z':
- skip_zero = 0;
- break;
case 'T':
show_totals = 1;
break;
- case 'S':
- sort_size = 1;
- break;
- case 'N':
- if (optarg) {
- output_lines = atoi(optarg);
- if (output_lines < 1)
- output_lines = 1;
- }
+ case 'U':
+ unreclaim_only = 1;
break;
- case 'L':
- sort_loss = 1;
+ case 'v':
+ validate = 1;
break;
case 'X':
if (output_lines == -1)
@@ -1445,11 +1467,11 @@ int main(int argc, char *argv[])
extended_totals = 1;
show_bytes = 1;
break;
- case 'B':
- show_bytes = 1;
+ case 'z':
+ skip_zero = 0;
break;
- case 'U':
- unreclaim_only = 1;
+ case '1':
+ show_single_ref = 1;
break;
default:
fatal("%s: Invalid option '%c'\n", argv[0], optopt);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 198e5171e1f7..38b4c910b6c3 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -129,7 +129,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < max) {
- page = (void *)__get_free_page(PGALLOC_GFP);
+ page = (void *)__get_free_page(GFP_PGTABLE_USER);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;