diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 5 | ||||
-rw-r--r-- | mm/debug.c | 28 | ||||
-rw-r--r-- | mm/hwpoison-inject.c | 2 | ||||
-rw-r--r-- | mm/kmemleak.c | 2 | ||||
-rw-r--r-- | mm/ksm.c | 9 | ||||
-rw-r--r-- | mm/madvise.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 48 | ||||
-rw-r--r-- | mm/memory.c | 10 | ||||
-rw-r--r-- | mm/mlock.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/page_io.c | 4 | ||||
-rw-r--r-- | mm/page_owner.c | 1 | ||||
-rw-r--r-- | mm/page_vma_mapped.c | 66 | ||||
-rw-r--r-- | mm/sparse.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 3 | ||||
-rw-r--r-- | mm/zsmalloc.c | 1 |
19 files changed, 102 insertions, 95 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 84b2dc76f140..b5f940ce0143 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) if (IS_ERR(dev)) return PTR_ERR(dev); - if (bdi_debug_register(bdi, dev_name(dev))) { - device_destroy(bdi_class, dev->devt); - return -ENOMEM; - } cgwb_bdi_register(bdi); bdi->dev = dev; + bdi_debug_register(bdi, dev_name(dev)); set_bit(WB_registered, &bdi->wb.state); spin_lock_bh(&bdi_lock); diff --git a/mm/debug.c b/mm/debug.c index d947f3e03b0d..56e2d9125ea5 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason) */ int mapcount = PageSlab(page) ? 0 : page_mapcount(page); - pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", + pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) @@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason) #ifdef CONFIG_MEMCG if (page->mem_cgroup) - pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); + pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } @@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page); void dump_vma(const struct vm_area_struct *vma) { - pr_emerg("vma %p start %p end %p\n" - "next %p prev %p mm %p\n" - "prot %lx anon_vma %p vm_ops %p\n" - "pgoff %lx file %p private_data %p\n" + pr_emerg("vma %px start %px end %px\n" + "next %px prev %px mm %px\n" + "prot %lx anon_vma %px vm_ops %px\n" + "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, @@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU - "get_unmapped_area %p\n" + "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" - "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" + "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" - "binfmt %p flags %lx core_state %p\n" + "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO - "ioctx_table %p\n" + "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG - "owner %p " + "owner %px " #endif - "exe_file %p\n" + "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER - "mmu_notifier_mm %p\n" + "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 356df057a2a8..b6ac70616c32 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -52,7 +52,7 @@ static int hwpoison_inject(void *data, u64 val) inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); - return memory_failure(pfn, 18, MF_COUNT_INCREASED); + return memory_failure(pfn, MF_COUNT_INCREASED); put_out: put_hwpoison_page(p); return 0; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index d73c14294f3a..f656ca27f6c2 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -127,7 +127,7 @@ /* GFP bitmask for kmemleak internal allocations */ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ - __GFP_NOWARN) + __GFP_NOWARN | __GFP_NOFAIL) /* scanning area inside a memory block */ struct kmemleak_scan_area { @@ -675,15 +675,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) expected_mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); again: - kpfn = READ_ONCE(stable_node->kpfn); + kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ page = pfn_to_page(kpfn); - - /* - * page is computed from kpfn, so on most architectures reading - * page->mapping is naturally ordered after reading node->kpfn, - * but on Alpha we need to be more careful. - */ - smp_read_barrier_depends(); if (READ_ONCE(page->mapping) != expected_mapping) goto stale; diff --git a/mm/madvise.c b/mm/madvise.c index 751e97aa2210..4d3c922ea1a1 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -661,7 +661,7 @@ static int madvise_inject_error(int behavior, pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", page_to_pfn(page), start); - ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED); + ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED); if (ret) return ret; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ac2ffd5e02b9..9011997d8a5c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3777,7 +3777,7 @@ static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, struct mem_cgroup_event *event = container_of(wait, struct mem_cgroup_event, wait); struct mem_cgroup *memcg = event->memcg; - unsigned long flags = (unsigned long)key; + __poll_t flags = key_to_poll(key); if (flags & POLLHUP) { /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4acdf393a801..4b80ccee4535 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -178,25 +178,19 @@ EXPORT_SYMBOL_GPL(hwpoison_filter); * ``action optional'' if they are not immediately affected by the error * ``action required'' if error happened in current execution context */ -static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, +static int kill_proc(struct task_struct *t, unsigned long addr, unsigned long pfn, struct page *page, int flags) { - struct siginfo si; + short addr_lsb; int ret; pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n", pfn, t->comm, t->pid); - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_addr = (void *)addr; -#ifdef __ARCH_SI_TRAPNO - si.si_trapno = trapno; -#endif - si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; + addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { - si.si_code = BUS_MCEERR_AR; - ret = force_sig_info(SIGBUS, &si, current); + ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, + addr_lsb, current); } else { /* * Don't use force here, it's convenient if the signal @@ -204,8 +198,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, * This could cause a loop when the user sets SIGBUS * to SIG_IGN, but hopefully no one will do that? */ - si.si_code = BUS_MCEERR_AO; - ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ + ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr, + addr_lsb, t); /* synchronous? */ } if (ret < 0) pr_info("Memory failure: Error sending signal to %s:%d: %d\n", @@ -323,7 +317,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, * Also when FAIL is set do a force kill because something went * wrong earlier. */ -static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, +static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, struct page *page, unsigned long pfn, int flags) { @@ -348,7 +342,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, * check for that, but we need to tell the * process anyways. */ - else if (kill_proc(tk->tsk, tk->addr, trapno, + else if (kill_proc(tk->tsk, tk->addr, pfn, page, flags) < 0) pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n", pfn, tk->tsk->comm, tk->tsk->pid); @@ -927,7 +921,7 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page); * the pages and send SIGBUS to the processes if the data was dirty. */ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, - int trapno, int flags, struct page **hpagep) + int flags, struct page **hpagep) { enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; struct address_space *mapping; @@ -1017,7 +1011,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * any accesses to the poisoned memory. */ forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL); - kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags); + kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags); return unmap_success; } @@ -1045,7 +1039,7 @@ static int identify_page_state(unsigned long pfn, struct page *p, return page_action(ps, p, pfn); } -static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags) +static int memory_failure_hugetlb(unsigned long pfn, int flags) { struct page *p = pfn_to_page(pfn); struct page *head = compound_head(p); @@ -1090,7 +1084,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags) return 0; } - if (!hwpoison_user_mappings(p, pfn, trapno, flags, &head)) { + if (!hwpoison_user_mappings(p, pfn, flags, &head)) { action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); res = -EBUSY; goto out; @@ -1105,7 +1099,6 @@ out: /** * memory_failure - Handle memory failure of a page. * @pfn: Page Number of the corrupted page - * @trapno: Trap number reported in the signal to user space. * @flags: fine tune action taken * * This function is called by the low level machine check code @@ -1120,7 +1113,7 @@ out: * Must run in process context (e.g. a work queue) with interrupts * enabled and no spinlocks hold. */ -int memory_failure(unsigned long pfn, int trapno, int flags) +int memory_failure(unsigned long pfn, int flags) { struct page *p; struct page *hpage; @@ -1129,7 +1122,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) unsigned long page_flags; if (!sysctl_memory_failure_recovery) - panic("Memory failure from trap %d on page %lx", trapno, pfn); + panic("Memory failure on page %lx", pfn); if (!pfn_valid(pfn)) { pr_err("Memory failure: %#lx: memory outside kernel control\n", @@ -1139,7 +1132,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) p = pfn_to_page(pfn); if (PageHuge(p)) - return memory_failure_hugetlb(pfn, trapno, flags); + return memory_failure_hugetlb(pfn, flags); if (TestSetPageHWPoison(p)) { pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn); @@ -1268,7 +1261,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * When the raw error page is thp tail page, hpage points to the raw * page after thp split. */ - if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) { + if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) { action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); res = -EBUSY; goto out; @@ -1296,7 +1289,6 @@ EXPORT_SYMBOL_GPL(memory_failure); struct memory_failure_entry { unsigned long pfn; - int trapno; int flags; }; @@ -1312,7 +1304,6 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); /** * memory_failure_queue - Schedule handling memory failure of a page. * @pfn: Page Number of the corrupted page - * @trapno: Trap number reported in the signal to user space. * @flags: Flags for memory failure handling * * This function is called by the low level hardware error handler @@ -1326,13 +1317,12 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); * * Can run in IRQ context. */ -void memory_failure_queue(unsigned long pfn, int trapno, int flags) +void memory_failure_queue(unsigned long pfn, int flags) { struct memory_failure_cpu *mf_cpu; unsigned long proc_flags; struct memory_failure_entry entry = { .pfn = pfn, - .trapno = trapno, .flags = flags, }; @@ -1365,7 +1355,7 @@ static void memory_failure_work_func(struct work_struct *work) if (entry.flags & MF_SOFT_OFFLINE) soft_offline_page(pfn_to_page(entry.pfn), entry.flags); else - memory_failure(entry.pfn, entry.trapno, entry.flags); + memory_failure(entry.pfn, entry.flags); } } diff --git a/mm/memory.c b/mm/memory.c index ca5674cbaff2..793004608332 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2857,8 +2857,11 @@ int do_swap_page(struct vm_fault *vmf) int ret = 0; bool vma_readahead = swap_use_vma_readahead(); - if (vma_readahead) + if (vma_readahead) { page = swap_readahead_detect(vmf, &swap_ra); + swapcache = page; + } + if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) { if (page) put_page(page); @@ -2889,9 +2892,12 @@ int do_swap_page(struct vm_fault *vmf) delayacct_set_flag(DELAYACCT_PF_SWAPIN); - if (!page) + if (!page) { page = lookup_swap_cache(entry, vma_readahead ? vma : NULL, vmf->address); + swapcache = page; + } + if (!page) { struct swap_info_struct *si = swp_swap_info(entry); diff --git a/mm/mlock.c b/mm/mlock.c index 30472d438794..f7f54fd2e13f 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -779,7 +779,7 @@ static int apply_mlockall_flags(int flags) /* Ignore errors */ mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); - cond_resched_rcu_qs(); + cond_resched(); } out: return 0; diff --git a/mm/mprotect.c b/mm/mprotect.c index ec39f730a0bf..58b629bb70de 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, next = pmd_addr_end(addr, end); if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) && pmd_none_or_clear_bad(pmd)) - continue; + goto next; /* invoke the mmu notifier if the pmd is populated */ if (!mni_start) { @@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } /* huge pmd was handled */ - continue; + goto next; } } /* fall through, the trans huge pmd just split */ @@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, this_pages = change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa); pages += this_pages; +next: + cond_resched(); } while (pmd++, addr = next, addr != end); if (mni_start) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e5e775e97f4..76c9688b6a0a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void) pgcnt = 0; for_each_resv_unavail_range(i, &start, &end) { for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { + if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) + continue; mm_zero_struct_page(pfn_to_page(pfn)); pgcnt++; } diff --git a/mm/page_io.c b/mm/page_io.c index e93f1a4cacd7..b41cf9644585 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -50,7 +50,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, void end_swap_bio_write(struct bio *bio) { - struct page *page = bio->bi_io_vec[0].bv_page; + struct page *page = bio_first_page_all(bio); if (bio->bi_status) { SetPageError(page); @@ -122,7 +122,7 @@ static void swap_slot_free_notify(struct page *page) static void end_swap_bio_read(struct bio *bio) { - struct page *page = bio->bi_io_vec[0].bv_page; + struct page *page = bio_first_page_all(bio); struct task_struct *waiter = bio->bi_private; if (bio->bi_status) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 8592543a0f15..270a8219ccd0 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -616,7 +616,6 @@ static void init_early_allocated_pages(void) { pg_data_t *pgdat; - drain_all_pages(NULL); for_each_online_pgdat(pgdat) init_zones_in_node(pgdat); } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index d22b84310f6d..ae3c2a35d61b 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) return true; } +static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn) +{ + unsigned long hpage_pfn = page_to_pfn(hpage); + + /* THP can be referenced by any subpage */ + return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage); +} + +/** + * check_pte - check if @pvmw->page is mapped at the @pvmw->pte + * + * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* + * mapped. check_pte() has to validate this. + * + * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary + * page. + * + * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration + * entry that points to @pvmw->page or any subpage in case of THP. + * + * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to + * @pvmw->page or any subpage in case of THP. + * + * Otherwise, return false. + * + */ static bool check_pte(struct page_vma_mapped_walk *pvmw) { + unsigned long pfn; + if (pvmw->flags & PVMW_MIGRATION) { -#ifdef CONFIG_MIGRATION swp_entry_t entry; if (!is_swap_pte(*pvmw->pte)) return false; @@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) if (!is_migration_entry(entry)) return false; - if (migration_entry_to_page(entry) - pvmw->page >= - hpage_nr_pages(pvmw->page)) { - return false; - } - if (migration_entry_to_page(entry) < pvmw->page) - return false; -#else - WARN_ON_ONCE(1); -#endif - } else { - if (is_swap_pte(*pvmw->pte)) { - swp_entry_t entry; - entry = pte_to_swp_entry(*pvmw->pte); - if (is_device_private_entry(entry) && - device_private_entry_to_page(entry) == pvmw->page) - return true; - } + pfn = migration_entry_to_pfn(entry); + } else if (is_swap_pte(*pvmw->pte)) { + swp_entry_t entry; - if (!pte_present(*pvmw->pte)) + /* Handle un-addressable ZONE_DEVICE memory */ + entry = pte_to_swp_entry(*pvmw->pte); + if (!is_device_private_entry(entry)) return false; - /* THP can be referenced by any subpage */ - if (pte_page(*pvmw->pte) - pvmw->page >= - hpage_nr_pages(pvmw->page)) { - return false; - } - if (pte_page(*pvmw->pte) < pvmw->page) + pfn = device_private_entry_to_pfn(entry); + } else { + if (!pte_present(*pvmw->pte)) return false; + + pfn = pte_pfn(*pvmw->pte); } - return true; + return pfn_in_hpage(pvmw->page, pfn); } /** diff --git a/mm/sparse.c b/mm/sparse.c index 7a5dacaa06e3..2609aba121e8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) if (unlikely(!mem_section)) { unsigned long size, align; - size = sizeof(struct mem_section) * NR_SECTION_ROOTS; + size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); mem_section = memblock_virt_alloc(size, align); } diff --git a/mm/swapfile.c b/mm/swapfile.c index 3074b02eaa09..42fe5653814a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2697,7 +2697,7 @@ out: } #ifdef CONFIG_PROC_FS -static unsigned swaps_poll(struct file *file, poll_table *wait) +static __poll_t swaps_poll(struct file *file, poll_table *wait) { struct seq_file *seq = file->private_data; diff --git a/mm/vmscan.c b/mm/vmscan.c index c02c850ea349..47d5ced51f2d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { + if (!shrinker->nr_deferred) + return; down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; } EXPORT_SYMBOL(unregister_shrinker); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 685049a9048d..683c0651098c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -53,6 +53,7 @@ #include <linux/mount.h> #include <linux/migrate.h> #include <linux/pagemap.h> +#include <linux/fs.h> #define ZSPAGE_MAGIC 0x58 |