| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * Copyright (C) 1993 Linus Torvalds |
| * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 |
| * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 |
| * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 |
| * Numa awareness, Christoph Lameter, SGI, June 2005 |
| * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 |
| */ |
| |
| #include <linux/vmalloc.h> |
| #include <linux/mm.h> |
| #include <linux/module.h> |
| #include <linux/highmem.h> |
| #include <linux/sched/signal.h> |
| #include <linux/slab.h> |
| #include <linux/spinlock.h> |
| #include <linux/interrupt.h> |
| #include <linux/proc_fs.h> |
| #include <linux/seq_file.h> |
| #include <linux/set_memory.h> |
| #include <linux/debugobjects.h> |
| #include <linux/kallsyms.h> |
| #include <linux/list.h> |
| #include <linux/notifier.h> |
| #include <linux/rbtree.h> |
| #include <linux/xarray.h> |
| #include <linux/io.h> |
| #include <linux/rcupdate.h> |
| #include <linux/pfn.h> |
| #include <linux/kmemleak.h> |
| #include <linux/atomic.h> |
| #include <linux/compiler.h> |
| #include <linux/memcontrol.h> |
| #include <linux/llist.h> |
| #include <linux/uio.h> |
| #include <linux/bitops.h> |
| #include <linux/rbtree_augmented.h> |
| #include <linux/overflow.h> |
| #include <linux/pgtable.h> |
| #include <linux/hugetlb.h> |
| #include <linux/sched/mm.h> |
| #include <asm/tlbflush.h> |
| #include <asm/shmparam.h> |
| #include <linux/page_owner.h> |
| |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/vmalloc.h> |
| |
| #include "internal.h" |
| #include "pgalloc-track.h" |
| |
| #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
| static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; |
| |
| static int __init set_nohugeiomap(char *str) |
| { |
| ioremap_max_page_shift = PAGE_SHIFT; |
| return 0; |
| } |
| early_param("nohugeiomap", set_nohugeiomap); |
| #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; |
| #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| |
| #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC |
| static bool __ro_after_init vmap_allow_huge = true; |
| |
| static int __init set_nohugevmalloc(char *str) |
| { |
| vmap_allow_huge = false; |
| return 0; |
| } |
| early_param("nohugevmalloc", set_nohugevmalloc); |
| #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ |
| static const bool vmap_allow_huge = false; |
| #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ |
| |
| bool is_vmalloc_addr(const void *x) |
| { |
| unsigned long addr = (unsigned long)kasan_reset_tag(x); |
| |
| return addr >= VMALLOC_START && addr < VMALLOC_END; |
| } |
| EXPORT_SYMBOL(is_vmalloc_addr); |
| |
| struct vfree_deferred { |
| struct llist_head list; |
| struct work_struct wq; |
| }; |
| static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); |
| |
| /*** Page table manipulation functions ***/ |
| static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift, pgtbl_mod_mask *mask) |
| { |
| pte_t *pte; |
| u64 pfn; |
| struct page *page; |
| unsigned long size = PAGE_SIZE; |
| |
| pfn = phys_addr >> PAGE_SHIFT; |
| pte = pte_alloc_kernel_track(pmd, addr, mask); |
| if (!pte) |
| return -ENOMEM; |
| do { |
| if (!pte_none(ptep_get(pte))) { |
| if (pfn_valid(pfn)) { |
| page = pfn_to_page(pfn); |
| dump_page(page, "remapping already mapped page"); |
| } |
| BUG(); |
| } |
| |
| #ifdef CONFIG_HUGETLB_PAGE |
| size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); |
| if (size != PAGE_SIZE) { |
| pte_t entry = pfn_pte(pfn, prot); |
| |
| entry = arch_make_huge_pte(entry, ilog2(size), 0); |
| set_huge_pte_at(&init_mm, addr, pte, entry, size); |
| pfn += PFN_DOWN(size); |
| continue; |
| } |
| #endif |
| set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); |
| pfn++; |
| } while (pte += PFN_DOWN(size), addr += size, addr != end); |
| *mask |= PGTBL_PTE_MODIFIED; |
| return 0; |
| } |
| |
| static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift) |
| { |
| if (max_page_shift < PMD_SHIFT) |
| return 0; |
| |
| if (!arch_vmap_pmd_supported(prot)) |
| return 0; |
| |
| if ((end - addr) != PMD_SIZE) |
| return 0; |
| |
| if (!IS_ALIGNED(addr, PMD_SIZE)) |
| return 0; |
| |
| if (!IS_ALIGNED(phys_addr, PMD_SIZE)) |
| return 0; |
| |
| if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) |
| return 0; |
| |
| return pmd_set_huge(pmd, phys_addr, prot); |
| } |
| |
| static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift, pgtbl_mod_mask *mask) |
| { |
| pmd_t *pmd; |
| unsigned long next; |
| |
| pmd = pmd_alloc_track(&init_mm, pud, addr, mask); |
| if (!pmd) |
| return -ENOMEM; |
| do { |
| next = pmd_addr_end(addr, end); |
| |
| if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, |
| max_page_shift)) { |
| *mask |= PGTBL_PMD_MODIFIED; |
| continue; |
| } |
| |
| if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) |
| return -ENOMEM; |
| } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift) |
| { |
| if (max_page_shift < PUD_SHIFT) |
| return 0; |
| |
| if (!arch_vmap_pud_supported(prot)) |
| return 0; |
| |
| if ((end - addr) != PUD_SIZE) |
| return 0; |
| |
| if (!IS_ALIGNED(addr, PUD_SIZE)) |
| return 0; |
| |
| if (!IS_ALIGNED(phys_addr, PUD_SIZE)) |
| return 0; |
| |
| if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) |
| return 0; |
| |
| return pud_set_huge(pud, phys_addr, prot); |
| } |
| |
| static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift, pgtbl_mod_mask *mask) |
| { |
| pud_t *pud; |
| unsigned long next; |
| |
| pud = pud_alloc_track(&init_mm, p4d, addr, mask); |
| if (!pud) |
| return -ENOMEM; |
| do { |
| next = pud_addr_end(addr, end); |
| |
| if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, |
| max_page_shift)) { |
| *mask |= PGTBL_PUD_MODIFIED; |
| continue; |
| } |
| |
| if (vmap_pmd_range(pud, addr, next, phys_addr, prot, |
| max_page_shift, mask)) |
| return -ENOMEM; |
| } while (pud++, phys_addr += (next - addr), addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift) |
| { |
| if (max_page_shift < P4D_SHIFT) |
| return 0; |
| |
| if (!arch_vmap_p4d_supported(prot)) |
| return 0; |
| |
| if ((end - addr) != P4D_SIZE) |
| return 0; |
| |
| if (!IS_ALIGNED(addr, P4D_SIZE)) |
| return 0; |
| |
| if (!IS_ALIGNED(phys_addr, P4D_SIZE)) |
| return 0; |
| |
| if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) |
| return 0; |
| |
| return p4d_set_huge(p4d, phys_addr, prot); |
| } |
| |
| static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift, pgtbl_mod_mask *mask) |
| { |
| p4d_t *p4d; |
| unsigned long next; |
| |
| p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); |
| if (!p4d) |
| return -ENOMEM; |
| do { |
| next = p4d_addr_end(addr, end); |
| |
| if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, |
| max_page_shift)) { |
| *mask |= PGTBL_P4D_MODIFIED; |
| continue; |
| } |
| |
| if (vmap_pud_range(p4d, addr, next, phys_addr, prot, |
| max_page_shift, mask)) |
| return -ENOMEM; |
| } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_range_noflush(unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot, |
| unsigned int max_page_shift) |
| { |
| pgd_t *pgd; |
| unsigned long start; |
| unsigned long next; |
| int err; |
| pgtbl_mod_mask mask = 0; |
| |
| might_sleep(); |
| BUG_ON(addr >= end); |
| |
| start = addr; |
| pgd = pgd_offset_k(addr); |
| do { |
| next = pgd_addr_end(addr, end); |
| err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, |
| max_page_shift, &mask); |
| if (err) |
| break; |
| } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); |
| |
| if (mask & ARCH_PAGE_TABLE_SYNC_MASK) |
| arch_sync_kernel_mappings(start, end); |
| |
| return err; |
| } |
| |
| int vmap_page_range(unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot) |
| { |
| int err; |
| |
| err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), |
| ioremap_max_page_shift); |
| flush_cache_vmap(addr, end); |
| if (!err) |
| err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, |
| ioremap_max_page_shift); |
| return err; |
| } |
| |
| int ioremap_page_range(unsigned long addr, unsigned long end, |
| phys_addr_t phys_addr, pgprot_t prot) |
| { |
| struct vm_struct *area; |
| |
| area = find_vm_area((void *)addr); |
| if (!area || !(area->flags & VM_IOREMAP)) { |
| WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); |
| return -EINVAL; |
| } |
| if (addr != (unsigned long)area->addr || |
| (void *)end != area->addr + get_vm_area_size(area)) { |
| WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", |
| addr, end, (long)area->addr, |
| (long)area->addr + get_vm_area_size(area)); |
| return -ERANGE; |
| } |
| return vmap_page_range(addr, end, phys_addr, prot); |
| } |
| |
| static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
| pgtbl_mod_mask *mask) |
| { |
| pte_t *pte; |
| |
| pte = pte_offset_kernel(pmd, addr); |
| do { |
| pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); |
| WARN_ON(!pte_none(ptent) && !pte_present(ptent)); |
| } while (pte++, addr += PAGE_SIZE, addr != end); |
| *mask |= PGTBL_PTE_MODIFIED; |
| } |
| |
| static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, |
| pgtbl_mod_mask *mask) |
| { |
| pmd_t *pmd; |
| unsigned long next; |
| int cleared; |
| |
| pmd = pmd_offset(pud, addr); |
| do { |
| next = pmd_addr_end(addr, end); |
| |
| cleared = pmd_clear_huge(pmd); |
| if (cleared || pmd_bad(*pmd)) |
| *mask |= PGTBL_PMD_MODIFIED; |
| |
| if (cleared) |
| continue; |
| if (pmd_none_or_clear_bad(pmd)) |
| continue; |
| vunmap_pte_range(pmd, addr, next, mask); |
| |
| cond_resched(); |
| } while (pmd++, addr = next, addr != end); |
| } |
| |
| static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, |
| pgtbl_mod_mask *mask) |
| { |
| pud_t *pud; |
| unsigned long next; |
| int cleared; |
| |
| pud = pud_offset(p4d, addr); |
| do { |
| next = pud_addr_end(addr, end); |
| |
| cleared = pud_clear_huge(pud); |
| if (cleared || pud_bad(*pud)) |
| *mask |= PGTBL_PUD_MODIFIED; |
| |
| if (cleared) |
| continue; |
| if (pud_none_or_clear_bad(pud)) |
| continue; |
| vunmap_pmd_range(pud, addr, next, mask); |
| } while (pud++, addr = next, addr != end); |
| } |
| |
| static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, |
| pgtbl_mod_mask *mask) |
| { |
| p4d_t *p4d; |
| unsigned long next; |
| |
| p4d = p4d_offset(pgd, addr); |
| do { |
| next = p4d_addr_end(addr, end); |
| |
| p4d_clear_huge(p4d); |
| if (p4d_bad(*p4d)) |
| *mask |= PGTBL_P4D_MODIFIED; |
| |
| if (p4d_none_or_clear_bad(p4d)) |
| continue; |
| vunmap_pud_range(p4d, addr, next, mask); |
| } while (p4d++, addr = next, addr != end); |
| } |
| |
| /* |
| * vunmap_range_noflush is similar to vunmap_range, but does not |
| * flush caches or TLBs. |
| * |
| * The caller is responsible for calling flush_cache_vmap() before calling |
| * this function, and flush_tlb_kernel_range after it has returned |
| * successfully (and before the addresses are expected to cause a page fault |
| * or be re-mapped for something else, if TLB flushes are being delayed or |
| * coalesced). |
| * |
| * This is an internal function only. Do not use outside mm/. |
| */ |
| void __vunmap_range_noflush(unsigned long start, unsigned long end) |
| { |
| unsigned long next; |
| pgd_t *pgd; |
| unsigned long addr = start; |
| pgtbl_mod_mask mask = 0; |
| |
| BUG_ON(addr >= end); |
| pgd = pgd_offset_k(addr); |
| do { |
| next = pgd_addr_end(addr, end); |
| if (pgd_bad(*pgd)) |
| mask |= PGTBL_PGD_MODIFIED; |
| if (pgd_none_or_clear_bad(pgd)) |
| continue; |
| vunmap_p4d_range(pgd, addr, next, &mask); |
| } while (pgd++, addr = next, addr != end); |
| |
| if (mask & ARCH_PAGE_TABLE_SYNC_MASK) |
| arch_sync_kernel_mappings(start, end); |
| } |
| |
| void vunmap_range_noflush(unsigned long start, unsigned long end) |
| { |
| kmsan_vunmap_range_noflush(start, end); |
| __vunmap_range_noflush(start, end); |
| } |
| |
| /** |
| * vunmap_range - unmap kernel virtual addresses |
| * @addr: start of the VM area to unmap |
| * @end: end of the VM area to unmap (non-inclusive) |
| * |
| * Clears any present PTEs in the virtual address range, flushes TLBs and |
| * caches. Any subsequent access to the address before it has been re-mapped |
| * is a kernel bug. |
| */ |
| void vunmap_range(unsigned long addr, unsigned long end) |
| { |
| flush_cache_vunmap(addr, end); |
| vunmap_range_noflush(addr, end); |
| flush_tlb_kernel_range(addr, end); |
| } |
| |
| static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, |
| unsigned long end, pgprot_t prot, struct page **pages, int *nr, |
| pgtbl_mod_mask *mask) |
| { |
| pte_t *pte; |
| |
| /* |
| * nr is a running index into the array which helps higher level |
| * callers keep track of where we're up to. |
| */ |
| |
| pte = pte_alloc_kernel_track(pmd, addr, mask); |
| if (!pte) |
| return -ENOMEM; |
| do { |
| struct page *page = pages[*nr]; |
| |
| if (WARN_ON(!pte_none(ptep_get(pte)))) |
| return -EBUSY; |
| if (WARN_ON(!page)) |
| return -ENOMEM; |
| if (WARN_ON(!pfn_valid(page_to_pfn(page)))) |
| return -EINVAL; |
| |
| set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); |
| (*nr)++; |
| } while (pte++, addr += PAGE_SIZE, addr != end); |
| *mask |= PGTBL_PTE_MODIFIED; |
| return 0; |
| } |
| |
| static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, |
| unsigned long end, pgprot_t prot, struct page **pages, int *nr, |
| pgtbl_mod_mask *mask) |
| { |
| pmd_t *pmd; |
| unsigned long next; |
| |
| pmd = pmd_alloc_track(&init_mm, pud, addr, mask); |
| if (!pmd) |
| return -ENOMEM; |
| do { |
| next = pmd_addr_end(addr, end); |
| if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) |
| return -ENOMEM; |
| } while (pmd++, addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, |
| unsigned long end, pgprot_t prot, struct page **pages, int *nr, |
| pgtbl_mod_mask *mask) |
| { |
| pud_t *pud; |
| unsigned long next; |
| |
| pud = pud_alloc_track(&init_mm, p4d, addr, mask); |
| if (!pud) |
| return -ENOMEM; |
| do { |
| next = pud_addr_end(addr, end); |
| if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) |
| return -ENOMEM; |
| } while (pud++, addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, |
| unsigned long end, pgprot_t prot, struct page **pages, int *nr, |
| pgtbl_mod_mask *mask) |
| { |
| p4d_t *p4d; |
| unsigned long next; |
| |
| p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); |
| if (!p4d) |
| return -ENOMEM; |
| do { |
| next = p4d_addr_end(addr, end); |
| if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) |
| return -ENOMEM; |
| } while (p4d++, addr = next, addr != end); |
| return 0; |
| } |
| |
| static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, |
| pgprot_t prot, struct page **pages) |
| { |
| unsigned long start = addr; |
| pgd_t *pgd; |
| unsigned long next; |
| int err = 0; |
| int nr = 0; |
| pgtbl_mod_mask mask = 0; |
| |
| BUG_ON(addr >= end); |
| pgd = pgd_offset_k(addr); |
| do { |
| next = pgd_addr_end(addr, end); |
| if (pgd_bad(*pgd)) |
| mask |= PGTBL_PGD_MODIFIED; |
| err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); |
| if (err) |
| return err; |
| } while (pgd++, addr = next, addr != end); |
| |
| if (mask & ARCH_PAGE_TABLE_SYNC_MASK) |
| arch_sync_kernel_mappings(start, end); |
| |
| return 0; |
| } |
| |
| /* |
| * vmap_pages_range_noflush is similar to vmap_pages_range, but does not |
| * flush caches. |
| * |
| * The caller is responsible for calling flush_cache_vmap() after this |
| * function returns successfully and before the addresses are accessed. |
| * |
| * This is an internal function only. Do not use outside mm/. |
| */ |
| int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
| pgprot_t prot, struct page **pages, unsigned int page_shift) |
| { |
| unsigned int i, nr = (end - addr) >> PAGE_SHIFT; |
| |
| WARN_ON(page_shift < PAGE_SHIFT); |
| |
| if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || |
| page_shift == PAGE_SHIFT) |
| return vmap_small_pages_range_noflush(addr, end, prot, pages); |
| |
| for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { |
| int err; |
| |
| err = vmap_range_noflush(addr, addr + (1UL << page_shift), |
| page_to_phys(pages[i]), prot, |
| page_shift); |
| if (err) |
| return err; |
| |
| addr += 1UL << page_shift; |
| } |
| |
| return 0; |
| } |
| |
| int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
| pgprot_t prot, struct page **pages, unsigned int page_shift) |
| { |
| int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, |
| page_shift); |
| |
| if (ret) |
| return ret; |
| return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); |
| } |
| |
| /** |
| * vmap_pages_range - map pages to a kernel virtual address |
| * @addr: start of the VM area to map |
| * @end: end of the VM area to map (non-inclusive) |
| * @prot: page protection flags to use |
| * @pages: pages to map (always PAGE_SIZE pages) |
| * @page_shift: maximum shift that the pages may be mapped with, @pages must |
| * be aligned and contiguous up to at least this shift. |
| * |
| * RETURNS: |
| * 0 on success, -errno on failure. |
| */ |
| static int vmap_pages_range(unsigned long addr, unsigned long end, |
| pgprot_t prot, struct page **pages, unsigned int page_shift) |
| { |
| int err; |
| |
| err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); |
| flush_cache_vmap(addr, end); |
| return err; |
| } |
| |
| static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, |
| unsigned long end) |
| { |
| might_sleep(); |
| if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) |
| return -EINVAL; |
| if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) |
| return -EINVAL; |
| if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) |
| return -EINVAL; |
| if ((end - start) >> PAGE_SHIFT > totalram_pages()) |
| return -E2BIG; |
| if (start < (unsigned long)area->addr || |
| (void *)end > area->addr + get_vm_area_size(area)) |
| return -ERANGE; |
| return 0; |
| } |
| |
| /** |
| * vm_area_map_pages - map pages inside given sparse vm_area |
| * @area: vm_area |
| * @start: start address inside vm_area |
| * @end: end address inside vm_area |
| * @pages: pages to map (always PAGE_SIZE pages) |
| */ |
| int vm_area_map_pages(struct vm_struct *area, unsigned long start, |
| unsigned long end, struct page **pages) |
| { |
| int err; |
| |
| err = check_sparse_vm_area(area, start, end); |
| if (err) |
| return err; |
| |
| return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); |
| } |
| |
| /** |
| * vm_area_unmap_pages - unmap pages inside given sparse vm_area |
| * @area: vm_area |
| * @start: start address inside vm_area |
| * @end: end address inside vm_area |
| */ |
| void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, |
| unsigned long end) |
| { |
| if (check_sparse_vm_area(area, start, end)) |
| return; |
| |
| vunmap_range(start, end); |
| } |
| |
| int is_vmalloc_or_module_addr(const void *x) |
| { |
| /* |
| * ARM, x86-64 and sparc64 put modules in a special place, |
| * and fall back on vmalloc() if that fails. Others |
| * just put it in the vmalloc space. |
| */ |
| #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) |
| unsigned long addr = (unsigned long)kasan_reset_tag(x); |
| if (addr >= MODULES_VADDR && addr < MODULES_END) |
| return 1; |
| #endif |
| return is_vmalloc_addr(x); |
| } |
| EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); |
| |
| /* |
| * Walk a vmap address to the struct page it maps. Huge vmap mappings will |
| * return the tail page that corresponds to the base page address, which |
| * matches small vmap mappings. |
| */ |
| struct page *vmalloc_to_page(const void *vmalloc_addr) |
| { |
| unsigned long addr = (unsigned long) vmalloc_addr; |
| struct page *page = NULL; |
| pgd_t *pgd = pgd_offset_k(addr); |
| p4d_t *p4d; |
| pud_t *pud; |
| pmd_t *pmd; |
| pte_t *ptep, pte; |
| |
| /* |
| * XXX we might need to change this if we add VIRTUAL_BUG_ON for |
| * architectures that do not vmalloc module space |
| */ |
| VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); |
| |
| if (pgd_none(*pgd)) |
| return NULL; |
| if (WARN_ON_ONCE(pgd_leaf(*pgd))) |
| return NULL; /* XXX: no allowance for huge pgd */ |
| if (WARN_ON_ONCE(pgd_bad(*pgd))) |
| return NULL; |
| |
| p4d = p4d_offset(pgd, addr); |
| if (p4d_none(*p4d)) |
| return NULL; |
| if (p4d_leaf(*p4d)) |
| return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); |
| if (WARN_ON_ONCE(p4d_bad(*p4d))) |
| return NULL; |
| |
| pud = pud_offset(p4d, addr); |
| if (pud_none(*pud)) |
| return NULL; |
| if (pud_leaf(*pud)) |
| return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| if (WARN_ON_ONCE(pud_bad(*pud))) |
| return NULL; |
| |
| pmd = pmd_offset(pud, addr); |
| if (pmd_none(*pmd)) |
| return NULL; |
| if (pmd_leaf(*pmd)) |
| return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
| if (WARN_ON_ONCE(pmd_bad(*pmd))) |
| return NULL; |
| |
| ptep = pte_offset_kernel(pmd, addr); |
| pte = ptep_get(ptep); |
| if (pte_present(pte)) |
| page = pte_page(pte); |
| |
| return page; |
| } |
| EXPORT_SYMBOL(vmalloc_to_page); |
| |
| /* |
| * Map a vmalloc()-space virtual address to the physical page frame number. |
| */ |
| unsigned long vmalloc_to_pfn(const void *vmalloc_addr) |
| { |
| return page_to_pfn(vmalloc_to_page(vmalloc_addr)); |
| } |
| EXPORT_SYMBOL(vmalloc_to_pfn); |
| |
| |
| /*** Global kva allocator ***/ |
| |
| #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 |
| #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 |
| |
| |
| static DEFINE_SPINLOCK(free_vmap_area_lock); |
| static bool vmap_initialized __read_mostly; |
| |
| /* |
| * This kmem_cache is used for vmap_area objects. Instead of |
| * allocating from slab we reuse an object from this cache to |
| * make things faster. Especially in "no edge" splitting of |
| * free block. |
| */ |
| static struct kmem_cache *vmap_area_cachep; |
| |
| /* |
| * This linked list is used in pair with free_vmap_area_root. |
| * It gives O(1) access to prev/next to perform fast coalescing. |
| */ |
| static LIST_HEAD(free_vmap_area_list); |
| |
| /* |
| * This augment red-black tree represents the free vmap space. |
| * All vmap_area objects in this tree are sorted by va->va_start |
| * address. It is used for allocation and merging when a vmap |
| * object is released. |
| * |
| * Each vmap_area node contains a maximum available free block |
| * of its sub-tree, right or left. Therefore it is possible to |
| * find a lowest match of free area. |
| */ |
| static struct rb_root free_vmap_area_root = RB_ROOT; |
| |
| /* |
| * Preload a CPU with one object for "no edge" split case. The |
| * aim is to get rid of allocations from the atomic context, thus |
| * to use more permissive allocation masks. |
| */ |
| static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); |
| |
| /* |
| * This structure defines a single, solid model where a list and |
| * rb-tree are part of one entity protected by the lock. Nodes are |
| * sorted in ascending order, thus for O(1) access to left/right |
| * neighbors a list is used as well as for sequential traversal. |
| */ |
| struct rb_list { |
| struct rb_root root; |
| struct list_head head; |
| spinlock_t lock; |
| }; |
| |
| /* |
| * A fast size storage contains VAs up to 1M size. A pool consists |
| * of linked between each other ready to go VAs of certain sizes. |
| * An index in the pool-array corresponds to number of pages + 1. |
| */ |
| #define MAX_VA_SIZE_PAGES 256 |
| |
| struct vmap_pool { |
| struct list_head head; |
| unsigned long len; |
| }; |
| |
| /* |
| * An effective vmap-node logic. Users make use of nodes instead |
| * of a global heap. It allows to balance an access and mitigate |
| * contention. |
| */ |
| static struct vmap_node { |
| /* Simple size segregated storage. */ |
| struct vmap_pool pool[MAX_VA_SIZE_PAGES]; |
| spinlock_t pool_lock; |
| bool skip_populate; |
| |
| /* Bookkeeping data of this node. */ |
| struct rb_list busy; |
| struct rb_list lazy; |
| |
| /* |
| * Ready-to-free areas. |
| */ |
| struct list_head purge_list; |
| struct work_struct purge_work; |
| unsigned long nr_purged; |
| } single; |
| |
| /* |
| * Initial setup consists of one single node, i.e. a balancing |
| * is fully disabled. Later on, after vmap is initialized these |
| * parameters are updated based on a system capacity. |
| */ |
| static struct vmap_node *vmap_nodes = &single; |
| static __read_mostly unsigned int nr_vmap_nodes = 1; |
| static __read_mostly unsigned int vmap_zone_size = 1; |
| |
| static inline unsigned int |
| addr_to_node_id(unsigned long addr) |
| { |
| return (addr / vmap_zone_size) % nr_vmap_nodes; |
| } |
| |
| static inline struct vmap_node * |
| addr_to_node(unsigned long addr) |
| { |
| return &vmap_nodes[addr_to_node_id(addr)]; |
| } |
| |
| static inline struct vmap_node * |
| id_to_node(unsigned int id) |
| { |
| return &vmap_nodes[id % nr_vmap_nodes]; |
| } |
| |
| /* |
| * We use the value 0 to represent "no node", that is why |
| * an encoded value will be the node-id incremented by 1. |
| * It is always greater then 0. A valid node_id which can |
| * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id |
| * is not valid 0 is returned. |
| */ |
| static unsigned int |
| encode_vn_id(unsigned int node_id) |
| { |
| /* Can store U8_MAX [0:254] nodes. */ |
| if (node_id < nr_vmap_nodes) |
| return (node_id + 1) << BITS_PER_BYTE; |
| |
| /* Warn and no node encoded. */ |
| WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); |
| return 0; |
| } |
| |
| /* |
| * Returns an encoded node-id, the valid range is within |
| * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is |
| * returned if extracted data is wrong. |
| */ |
| static unsigned int |
| decode_vn_id(unsigned int val) |
| { |
| unsigned int node_id = (val >> BITS_PER_BYTE) - 1; |
| |
| /* Can store U8_MAX [0:254] nodes. */ |
| if (node_id < nr_vmap_nodes) |
| return node_id; |
| |
| /* If it was _not_ zero, warn. */ |
| WARN_ONCE(node_id != UINT_MAX, |
| "Decode wrong node id (%d)\n", node_id); |
| |
| return nr_vmap_nodes; |
| } |
| |
| static bool |
| is_vn_id_valid(unsigned int node_id) |
| { |
| if (node_id < nr_vmap_nodes) |
| return true; |
| |
| return false; |
| } |
| |
| static __always_inline unsigned long |
| va_size(struct vmap_area *va) |
| { |
| return (va->va_end - va->va_start); |
| } |
| |
| static __always_inline unsigned long |
| get_subtree_max_size(struct rb_node *node) |
| { |
| struct vmap_area *va; |
| |
| va = rb_entry_safe(node, struct vmap_area, rb_node); |
| return va ? va->subtree_max_size : 0; |
| } |
| |
| RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, |
| struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) |
| |
| static void reclaim_and_purge_vmap_areas(void); |
| static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); |
| static void drain_vmap_area_work(struct work_struct *work); |
| static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); |
| |
| static atomic_long_t nr_vmalloc_pages; |
| |
| unsigned long vmalloc_nr_pages(void) |
| { |
| return atomic_long_read(&nr_vmalloc_pages); |
| } |
| |
| static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) |
| { |
| struct rb_node *n = root->rb_node; |
| |
| addr = (unsigned long)kasan_reset_tag((void *)addr); |
| |
| while (n) { |
| struct vmap_area *va; |
| |
| va = rb_entry(n, struct vmap_area, rb_node); |
| if (addr < va->va_start) |
| n = n->rb_left; |
| else if (addr >= va->va_end) |
| n = n->rb_right; |
| else |
| return va; |
| } |
| |
| return NULL; |
| } |
| |
| /* Look up the first VA which satisfies addr < va_end, NULL if none. */ |
| static struct vmap_area * |
| __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) |
| { |
| struct vmap_area *va = NULL; |
| struct rb_node *n = root->rb_node; |
| |
| addr = (unsigned long)kasan_reset_tag((void *)addr); |
| |
| while (n) { |
| struct vmap_area *tmp; |
| |
| tmp = rb_entry(n, struct vmap_area, rb_node); |
| if (tmp->va_end > addr) { |
| va = tmp; |
| if (tmp->va_start <= addr) |
| break; |
| |
| n = n->rb_left; |
| } else |
| n = n->rb_right; |
| } |
| |
| return va; |
| } |
| |
| /* |
| * Returns a node where a first VA, that satisfies addr < va_end, resides. |
| * If success, a node is locked. A user is responsible to unlock it when a |
| * VA is no longer needed to be accessed. |
| * |
| * Returns NULL if nothing found. |
| */ |
| static struct vmap_node * |
| find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) |
| { |
| unsigned long va_start_lowest; |
| struct vmap_node *vn; |
| int i; |
| |
| repeat: |
| for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) { |
| vn = &vmap_nodes[i]; |
| |
| spin_lock(&vn->busy.lock); |
| *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); |
| |
| if (*va) |
| if (!va_start_lowest || (*va)->va_start < va_start_lowest) |
| va_start_lowest = (*va)->va_start; |
| spin_unlock(&vn->busy.lock); |
| } |
| |
| /* |
| * Check if found VA exists, it might have gone away. In this case we |
| * repeat the search because a VA has been removed concurrently and we |
| * need to proceed to the next one, which is a rare case. |
| */ |
| if (va_start_lowest) { |
| vn = addr_to_node(va_start_lowest); |
| |
| spin_lock(&vn->busy.lock); |
| *va = __find_vmap_area(va_start_lowest, &vn->busy.root); |
| |
| if (*va) |
| return vn; |
| |
| spin_unlock(&vn->busy.lock); |
| goto repeat; |
| } |
| |
| return NULL; |
| } |
| |
| /* |
| * This function returns back addresses of parent node |
| * and its left or right link for further processing. |
| * |
| * Otherwise NULL is returned. In that case all further |
| * steps regarding inserting of conflicting overlap range |
| * have to be declined and actually considered as a bug. |
| */ |
| static __always_inline struct rb_node ** |
| find_va_links(struct vmap_area *va, |
| struct rb_root *root, struct rb_node *from, |
| struct rb_node **parent) |
| { |
| struct vmap_area *tmp_va; |
| struct rb_node **link; |
| |
| if (root) { |
| link = &root->rb_node; |
| if (unlikely(!*link)) { |
| *parent = NULL; |
| return link; |
| } |
| } else { |
| link = &from; |
| } |
| |
| /* |
| * Go to the bottom of the tree. When we hit the last point |
| * we end up with parent rb_node and correct direction, i name |
| * it link, where the new va->rb_node will be attached to. |
| */ |
| do { |
| tmp_va = rb_entry(*link, struct vmap_area, rb_node); |
| |
| /* |
| * During the traversal we also do some sanity check. |
| * Trigger the BUG() if there are sides(left/right) |
| * or full overlaps. |
| */ |
| if (va->va_end <= tmp_va->va_start) |
| link = &(*link)->rb_left; |
| else if (va->va_start >= tmp_va->va_end) |
| link = &(*link)->rb_right; |
| else { |
| WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", |
| va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); |
| |
| return NULL; |
| } |
| } while (*link); |
| |
| *parent = &tmp_va->rb_node; |
| return link; |
| } |
| |
| static __always_inline struct list_head * |
| get_va_next_sibling(struct rb_node *parent, struct rb_node **link) |
| { |
| struct list_head *list; |
| |
| if (unlikely(!parent)) |
| /* |
| * The red-black tree where we try to find VA neighbors |
| * before merging or inserting is empty, i.e. it means |
| * there is no free vmap space. Normally it does not |
| * happen but we handle this case anyway. |
| */ |
| return NULL; |
| |
| list = &rb_entry(parent, struct vmap_area, rb_node)->list; |
| return (&parent->rb_right == link ? list->next : list); |
| } |
| |
| static __always_inline void |
| __link_va(struct vmap_area *va, struct rb_root *root, |
| struct rb_node *parent, struct rb_node **link, |
| struct list_head *head, bool augment) |
| { |
| /* |
| * VA is still not in the list, but we can |
| * identify its future previous list_head node. |
| */ |
| if (likely(parent)) { |
| head = &rb_entry(parent, struct vmap_area, rb_node)->list; |
| if (&parent->rb_right != link) |
| head = head->prev; |
| } |
| |
| /* Insert to the rb-tree */ |
| rb_link_node(&va->rb_node, parent, link); |
| if (augment) { |
| /* |
| * Some explanation here. Just perform simple insertion |
| * to the tree. We do not set va->subtree_max_size to |
| * its current size before calling rb_insert_augmented(). |
| * It is because we populate the tree from the bottom |
| * to parent levels when the node _is_ in the tree. |
| * |
| * Therefore we set subtree_max_size to zero after insertion, |
| * to let __augment_tree_propagate_from() puts everything to |
| * the correct order later on. |
| */ |
| rb_insert_augmented(&va->rb_node, |
| root, &free_vmap_area_rb_augment_cb); |
| va->subtree_max_size = 0; |
| } else { |
| rb_insert_color(&va->rb_node, root); |
| } |
| |
| /* Address-sort this list */ |
| list_add(&va->list, head); |
| } |
| |
| static __always_inline void |
| link_va(struct vmap_area *va, struct rb_root *root, |
| struct rb_node *parent, struct rb_node **link, |
| struct list_head *head) |
| { |
| __link_va(va, root, parent, link, head, false); |
| } |
| |
| static __always_inline void |
| link_va_augment(struct vmap_area *va, struct rb_root *root, |
| struct rb_node *parent, struct rb_node **link, |
| struct list_head *head) |
| { |
| __link_va(va, root, parent, link, head, true); |
| } |
| |
| static __always_inline void |
| __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) |
| { |
| if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) |
| return; |
| |
| if (augment) |
| rb_erase_augmented(&va->rb_node, |
| root, &free_vmap_area_rb_augment_cb); |
| else |
| rb_erase(&va->rb_node, root); |
| |
| list_del_init(&va->list); |
| RB_CLEAR_NODE(&va->rb_node); |
| } |
| |
| static __always_inline void |
| unlink_va(struct vmap_area *va, struct rb_root *root) |
| { |
| __unlink_va(va, root, false); |
| } |
| |
| static __always_inline void |
| unlink_va_augment(struct vmap_area *va, struct rb_root *root) |
| { |
| __unlink_va(va, root, true); |
| } |
| |
| #if DEBUG_AUGMENT_PROPAGATE_CHECK |
| /* |
| * Gets called when remove the node and rotate. |
| */ |
| static __always_inline unsigned long |
| compute_subtree_max_size(struct vmap_area *va) |
| { |
| return max3(va_size(va), |
| get_subtree_max_size(va->rb_node.rb_left), |
| get_subtree_max_size(va->rb_node.rb_right)); |
| } |
| |
| static void |
| augment_tree_propagate_check(void) |
| { |
| struct vmap_area *va; |
| unsigned long computed_size; |
| |
| list_for_each_entry(va, &free_vmap_area_list, list) { |
| computed_size = compute_subtree_max_size(va); |
| if (computed_size != va->subtree_max_size) |
| pr_emerg("tree is corrupted: %lu, %lu\n", |
| va_size(va), va->subtree_max_size); |
| } |
| } |
| #endif |
| |
| /* |
| * This function populates subtree_max_size from bottom to upper |
| * levels starting from VA point. The propagation must be done |
| * when VA size is modified by changing its va_start/va_end. Or |
| * in case of newly inserting of VA to the tree. |
| * |
| * It means that __augment_tree_propagate_from() must be called: |
| * - After VA has been inserted to the tree(free path); |
| * - After VA has been shrunk(allocation path); |
| * - After VA has been increased(merging path). |
| * |
| * Please note that, it does not mean that upper parent nodes |
| * and their subtree_max_size are recalculated all the time up |
| * to the root node. |
| * |
| * 4--8 |
| * /\ |
| * / \ |
| * / \ |
| * 2--2 8--8 |
| * |
| * For example if we modify the node 4, shrinking it to 2, then |
| * no any modification is required. If we shrink the node 2 to 1 |
| * its subtree_max_size is updated only, and set to 1. If we shrink |
| * the node 8 to 6, then its subtree_max_size is set to 6 and parent |
| * node becomes 4--6. |
| */ |
| static __always_inline void |
| augment_tree_propagate_from(struct vmap_area *va) |
| { |
| /* |
| * Populate the tree from bottom towards the root until |
| * the calculated maximum available size of checked node |
| * is equal to its current one. |
| */ |
| free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); |
| |
| #if DEBUG_AUGMENT_PROPAGATE_CHECK |
| augment_tree_propagate_check(); |
| #endif |
| } |
| |
| static void |
| insert_vmap_area(struct vmap_area *va, |
| struct rb_root *root, struct list_head *head) |
| { |
| struct rb_node **link; |
| struct rb_node *parent; |
| |
| link = find_va_links(va, root, NULL, &parent); |
| if (link) |
| link_va(va, root, parent, link, head); |
| } |
| |
| static void |
| insert_vmap_area_augment(struct vmap_area *va, |
| struct rb_node *from, struct rb_root *root, |
| struct list_head *head) |
| { |
| struct rb_node **link; |
| struct rb_node *parent; |
| |
| if (from) |
| link = find_va_links(va, NULL, from, &parent); |
| else |
| link = find_va_links(va, root, NULL, &parent); |
| |
| if (link) { |
| link_va_augment(va, root, parent, link, head); |
| augment_tree_propagate_from(va); |
| } |
| } |
| |
| /* |
| * Merge de-allocated chunk of VA memory with previous |
| * and next free blocks. If coalesce is not done a new |
| * free area is inserted. If VA has been merged, it is |
| * freed. |
| * |
| * Please note, it can return NULL in case of overlap |
| * ranges, followed by WARN() report. Despite it is a |
| * buggy behaviour, a system can be alive and keep |
| * ongoing. |
| */ |
| static __always_inline struct vmap_area * |
| __merge_or_add_vmap_area(struct vmap_area *va, |
| struct rb_root *root, struct list_head *head, bool augment) |
| { |
| struct vmap_area *sibling; |
| struct list_head *next; |
| struct rb_node **link; |
| struct rb_node *parent; |
| bool merged = false; |
| |
| /* |
| * Find a place in the tree where VA potentially will be |
| * inserted, unless it is merged with its sibling/siblings. |
| */ |
| link = find_va_links(va, root, NULL, &parent); |
| if (!link) |
| return NULL; |
| |
| /* |
| * Get next node of VA to check if merging can be done. |
| */ |
| next = get_va_next_sibling(parent, link); |
| if (unlikely(next == NULL)) |
| goto insert; |
| |
| /* |
| * start end |
| * | | |
| * |<------VA------>|<-----Next----->| |
| * | | |
| * start end |
| */ |
| if (next != head) { |
| sibling = list_entry(next, struct vmap_area, list); |
| if (sibling->va_start == va->va_end) { |
| sibling->va_start = va->va_start; |
| |
| /* Free vmap_area object. */ |
| kmem_cache_free(vmap_area_cachep, va); |
| |
| /* Point to the new merged area. */ |
| va = sibling; |
| merged = true; |
| } |
| } |
| |
| /* |
| * start end |
| * | | |
| * |<-----Prev----->|<------VA------>| |
| * | | |
| * start end |
| */ |
| if (next->prev != head) { |
| sibling = list_entry(next->prev, struct vmap_area, list); |
| if (sibling->va_end == va->va_start) { |
| /* |
| * If both neighbors are coalesced, it is important |
| * to unlink the "next" node first, followed by merging |
| * with "previous" one. Otherwise the tree might not be |
| * fully populated if a sibling's augmented value is |
| * "normalized" because of rotation operations. |
| */ |
| if (merged) |
| __unlink_va(va, root, augment); |
| |
| sibling->va_end = va->va_end; |
| |
| /* Free vmap_area object. */ |
| kmem_cache_free(vmap_area_cachep, va); |
| |
| /* Point to the new merged area. */ |
| va = sibling; |
| merged = true; |
| } |
| } |
| |
| insert: |
| if (!merged) |
| __link_va(va, root, parent, link, head, augment); |
| |
| return va; |
| } |
| |
| static __always_inline struct vmap_area * |
| merge_or_add_vmap_area(struct vmap_area *va, |
| struct rb_root *root, struct list_head *head) |
| { |
| return __merge_or_add_vmap_area(va, root, head, false); |
| } |
| |
| static __always_inline struct vmap_area * |
| merge_or_add_vmap_area_augment(struct vmap_area *va, |
| struct rb_root *root, struct list_head *head) |
| { |
| va = __merge_or_add_vmap_area(va, root, head, true); |
| if (va) |
| augment_tree_propagate_from(va); |
| |
| return va; |
| } |
| |
| static __always_inline bool |
| is_within_this_va(struct vmap_area *va, unsigned long size, |
| unsigned long align, unsigned long vstart) |
| { |
| unsigned long nva_start_addr; |
| |
| if (va->va_start > vstart) |
| nva_start_addr = ALIGN(va->va_start, align); |
| else |
| nva_start_addr = ALIGN(vstart, align); |
| |
| /* Can be overflowed due to big size or alignment. */ |
| if (nva_start_addr + size < nva_start_addr || |
| nva_start_addr < vstart) |
| return false; |
| |
| return (nva_start_addr + size <= va->va_end); |
| } |
| |
| /* |
| * Find the first free block(lowest start address) in the tree, |
| * that will accomplish the request corresponding to passing |
| * parameters. Please note, with an alignment bigger than PAGE_SIZE, |
| * a search length is adjusted to account for worst case alignment |
| * overhead. |
| */ |
| static __always_inline struct vmap_area * |
| find_vmap_lowest_match(struct rb_root *root, unsigned long size, |
| unsigned long align, unsigned long vstart, bool adjust_search_size) |
| { |
| struct vmap_area *va; |
| struct rb_node *node; |
| unsigned long length; |
| |
| /* Start from the root. */ |
| node = root->rb_node; |
| |
| /* Adjust the search size for alignment overhead. */ |
| length = adjust_search_size ? size + align - 1 : size; |
| |
| while (node) { |
| va = rb_entry(node, struct vmap_area, rb_node); |
| |
| if (get_subtree_max_size(node->rb_left) >= length && |
| vstart < va->va_start) { |
| node = node->rb_left; |
| } else { |
| if (is_within_this_va(va, size, align, vstart)) |
| return va; |
| |
| /* |
| * Does not make sense to go deeper towards the right |
| * sub-tree if it does not have a free block that is |
| * equal or bigger to the requested search length. |
| */ |
| if (get_subtree_max_size(node->rb_right) >= length) { |
| node = node->rb_right; |
| continue; |
| } |
| |
| /* |
| * OK. We roll back and find the first right sub-tree, |
| * that will satisfy the search criteria. It can happen |
| * due to "vstart" restriction or an alignment overhead |
| * that is bigger then PAGE_SIZE. |
| */ |
| while ((node = rb_parent(node))) { |
| va = rb_entry(node, struct vmap_area, rb_node); |
| if (is_within_this_va(va, size, align, vstart)) |
| return va; |
| |
| if (get_subtree_max_size(node->rb_right) >= length && |
| vstart <= va->va_start) { |
| /* |
| * Shift the vstart forward. Please note, we update it with |
| * parent's start address adding "1" because we do not want |
| * to enter same sub-tree after it has already been checked |
| * and no suitable free block found there. |
| */ |
| vstart = va->va_start + 1; |
| node = node->rb_right; |
| break; |
| } |
| } |
| } |
| } |
| |
| return NULL; |
| } |
| |
| #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK |
| #include <linux/random.h> |
| |
| static struct vmap_area * |
| find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, |
| unsigned long align, unsigned long vstart) |
| { |
| struct vmap_area *va; |
| |
| list_for_each_entry(va, head, list) { |
| if (!is_within_this_va(va, size, align, vstart)) |
| continue; |
| |
| return va; |
| } |
| |
| return NULL; |
| } |
| |
| static void |
| find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, |
| unsigned long size, unsigned long align) |
| { |
| struct vmap_area *va_1, *va_2; |
| unsigned long vstart; |
| unsigned int rnd; |
| |
| get_random_bytes(&rnd, sizeof(rnd)); |
| vstart = VMALLOC_START + rnd; |
| |
| va_1 = find_vmap_lowest_match(root, size, align, vstart, false); |
| va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); |
| |
| if (va_1 != va_2) |
| pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", |
| va_1, va_2, vstart); |
| } |
| #endif |
| |
| enum fit_type { |
| NOTHING_FIT = 0, |
| FL_FIT_TYPE = 1, /* full fit */ |
| LE_FIT_TYPE = 2, /* left edge fit */ |
| RE_FIT_TYPE = 3, /* right edge fit */ |
| NE_FIT_TYPE = 4 /* no edge fit */ |
| }; |
| |
| static __always_inline enum fit_type |
| classify_va_fit_type(struct vmap_area *va, |
| unsigned long nva_start_addr, unsigned long size) |
| { |
| enum fit_type type; |
| |
| /* Check if it is within VA. */ |
| if (nva_start_addr < va->va_start || |
| nva_start_addr + size > va->va_end) |
| return NOTHING_FIT; |
| |
| /* Now classify. */ |
| if (va->va_start == nva_start_addr) { |
| if (va->va_end == nva_start_addr + size) |
| type = FL_FIT_TYPE; |
| else |
| type = LE_FIT_TYPE; |
| } else if (va->va_end == nva_start_addr + size) { |
| type = RE_FIT_TYPE; |
| } else { |
| type = NE_FIT_TYPE; |
| } |
| |
| return type; |
| } |
| |
| static __always_inline int |
| va_clip(struct rb_root *root, struct list_head *head, |
| struct vmap_area *va, unsigned long nva_start_addr, |
| unsigned long size) |
| { |
| struct vmap_area *lva = NULL; |
| enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); |
| |
| if (type == FL_FIT_TYPE) { |
| /* |
| * No need to split VA, it fully fits. |
| * |
| * | | |
| * V NVA V |
| * |---------------| |
| */ |
| unlink_va_augment(va, root); |
| kmem_cache_free(vmap_area_cachep, va); |
| } else if (type == LE_FIT_TYPE) { |
| /* |
| * Split left edge of fit VA. |
| * |
| * | | |
| * V NVA V R |
| * |-------|-------| |
| */ |
| va->va_start += size; |
| } else if (type == RE_FIT_TYPE) { |
| /* |
| * Split right edge of fit VA. |
| * |
| * | | |
| * L V NVA V |
| * |-------|-------| |
| */ |
| va->va_end = nva_start_addr; |
| } else if (type == NE_FIT_TYPE) { |
| /* |
| * Split no edge of fit VA. |
| * |
| * | | |
| * L V NVA V R |
| * |---|-------|---| |
| */ |
| lva = __this_cpu_xchg(ne_fit_preload_node, NULL); |
| if (unlikely(!lva)) { |
| /* |
| * For percpu allocator we do not do any pre-allocation |
| * and leave it as it is. The reason is it most likely |
| * never ends up with NE_FIT_TYPE splitting. In case of |
| * percpu allocations offsets and sizes are aligned to |
| * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE |
| * are its main fitting cases. |
| * |
| * There are a few exceptions though, as an example it is |
| * a first allocation (early boot up) when we have "one" |
| * big free space that has to be split. |
| * |
| * Also we can hit this path in case of regular "vmap" |
| * allocations, if "this" current CPU was not preloaded. |
| * See the comment in alloc_vmap_area() why. If so, then |
| * GFP_NOWAIT is used instead to get an extra object for |
| * split purpose. That is rare and most time does not |
| * occur. |
| * |
| * What happens if an allocation gets failed. Basically, |
| * an "overflow" path is triggered to purge lazily freed |
| * areas to free some memory, then, the "retry" path is |
| * triggered to repeat one more time. See more details |
| * in alloc_vmap_area() function. |
| */ |
| lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); |
| if (!lva) |
| return -1; |
| } |
| |
| /* |
| * Build the remainder. |
| */ |
| lva->va_start = va->va_start; |
| lva->va_end = nva_start_addr; |
| |
| /* |
| * Shrink this VA to remaining size. |
| */ |
| va->va_start = nva_start_addr + size; |
| } else { |
| return -1; |
| } |
| |
| if (type != FL_FIT_TYPE) { |
| augment_tree_propagate_from(va); |
| |
| if (lva) /* type == NE_FIT_TYPE */ |
| insert_vmap_area_augment(lva, &va->rb_node, root, head); |
| } |
| |
| return 0; |
| } |
| |
| static unsigned long |
| va_alloc(struct vmap_area *va, |
| struct rb_root *root, struct list_head *head, |
| unsigned long size, unsigned long align, |
| unsigned long vstart, unsigned long vend) |
| { |
| unsigned long nva_start_addr; |
| int ret; |
| |
| if (va->va_start > vstart) |
| nva_start_addr = ALIGN(va->va_start, align); |
| else |
| nva_start_addr = ALIGN(vstart, align); |
| |
| /* Check the "vend" restriction. */ |
| if (nva_start_addr + size > vend) |
| return vend; |
| |
| /* Update the free vmap_area. */ |
| ret = va_clip(root, head, va, nva_start_addr, size); |
| if (WARN_ON_ONCE(ret)) |
| return vend; |
| |
| return nva_start_addr; |
| } |
| |
| /* |
| * Returns a start address of the newly allocated area, if success. |
| * Otherwise a vend is returned that indicates failure. |
| */ |
| static __always_inline unsigned long |
| __alloc_vmap_area(struct rb_root *root, struct list_head *head, |
| unsigned long size, unsigned long align, |
| unsigned long vstart, unsigned long vend) |
| { |
| bool adjust_search_size = true; |
| unsigned long nva_start_addr; |
| struct vmap_area *va; |
| |
| /* |
| * Do not adjust when: |
| * a) align <= PAGE_SIZE, because it does not make any sense. |
| * All blocks(their start addresses) are at least PAGE_SIZE |
| * aligned anyway; |
| * b) a short range where a requested size corresponds to exactly |
| * specified [vstart:vend] interval and an alignment > PAGE_SIZE. |
| * With adjusted search length an allocation would not succeed. |
| */ |
| if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) |
| adjust_search_size = false; |
| |
| va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); |
| if (unlikely(!va)) |
| return vend; |
| |
| nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); |
| if (nva_start_addr == vend) |
| return vend; |
| |
| #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK |
| find_vmap_lowest_match_check(root, head, size, align); |
| #endif |
| |
| return nva_start_addr; |
| } |
| |
| /* |
| * Free a region of KVA allocated by alloc_vmap_area |
| */ |
| static void free_vmap_area(struct vmap_area *va) |
| { |
| struct vmap_node *vn = addr_to_node(va->va_start); |
| |
| /* |
| * Remove from the busy tree/list. |
| */ |
| spin_lock(&vn->busy.lock); |
| unlink_va(va, &vn->busy.root); |
| spin_unlock(&vn->busy.lock); |
| |
| /* |
| * Insert/Merge it back to the free tree/list. |
| */ |
| spin_lock(&free_vmap_area_lock); |
| merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); |
| spin_unlock(&free_vmap_area_lock); |
| } |
| |
| static inline void |
| preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) |
| { |
| struct vmap_area *va = NULL, *tmp; |
| |
| /* |
| * Preload this CPU with one extra vmap_area object. It is used |
| * when fit type of free area is NE_FIT_TYPE. It guarantees that |
| * a CPU that does an allocation is preloaded. |
| * |
| * We do it in non-atomic context, thus it allows us to use more |
| * permissive allocation masks to be more stable under low memory |
| * condition and high memory pressure. |
| */ |
| if (!this_cpu_read(ne_fit_preload_node)) |
| va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); |
| |
| spin_lock(lock); |
| |
| tmp = NULL; |
| if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) |
| kmem_cache_free(vmap_area_cachep, va); |
| } |
| |
| static struct vmap_pool * |
| size_to_va_pool(struct vmap_node *vn, unsigned long size) |
| { |
| unsigned int idx = (size - 1) / PAGE_SIZE; |
| |
| if (idx < MAX_VA_SIZE_PAGES) |
| return &vn->pool[idx]; |
| |
| return NULL; |
| } |
| |
| static bool |
| node_pool_add_va(struct vmap_node *n, struct vmap_area *va) |
| { |
| struct vmap_pool *vp; |
| |
| vp = size_to_va_pool(n, va_size(va)); |
| if (!vp) |
| return false; |
| |
| spin_lock(&n->pool_lock); |
| list_add(&va->list, &vp->head); |
| WRITE_ONCE(vp->len, vp->len + 1); |
| spin_unlock(&n->pool_lock); |
| |
| return true; |
| } |
| |
| static struct vmap_area * |
| node_pool_del_va(struct vmap_node *vn, unsigned long size, |
| unsigned long align, unsigned long vstart, |
| unsigned long vend) |
| { |
| struct vmap_area *va = NULL; |
| struct vmap_pool *vp; |
| int err = 0; |
| |
| vp = size_to_va_pool(vn, size); |
| if (!vp || list_empty(&vp->head)) |
| return NULL; |
| |
| spin_lock(&vn->pool_lock); |
| if (!list_empty(&vp->head)) { |
| va = list_first_entry(&vp->head, struct vmap_area, list); |
| |
| if (IS_ALIGNED(va->va_start, align)) { |
| /* |
| * Do some sanity check and emit a warning |
| * if one of below checks detects an error. |
| */ |
| err |= (va_size(va) != size); |
| err |= (va->va_start < vstart); |
| err |= (va->va_end > vend); |
| |
| if (!WARN_ON_ONCE(err)) { |
| list_del_init(&va->list); |
| WRITE_ONCE(vp->len, vp->len - 1); |
| } else { |
| va = NULL; |
| } |
| } else { |
| list_move_tail(&va->list, &vp->head); |
| va = NULL; |
| } |
| } |
| spin_unlock(&vn->pool_lock); |
| |
| return va; |
| } |
| |
| static struct vmap_area * |
| node_alloc(unsigned long size, unsigned long align, |
| unsigned long vstart, unsigned long vend, |
| unsigned long *addr, unsigned int *vn_id) |
| { |
| struct vmap_area *va; |
| |
| *vn_id = 0; |
| *addr = vend; |
| |
| /* |
| * Fallback to a global heap if not vmalloc or there |
| * is only one node. |
| */ |
| if (vstart != VMALLOC_START || vend != VMALLOC_END || |
| nr_vmap_nodes == 1) |
| return NULL; |
| |
| *vn_id = raw_smp_processor_id() % nr_vmap_nodes; |
| va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); |
| *vn_id = encode_vn_id(*vn_id); |
| |
| if (va) |
| *addr = va->va_start; |
| |
| return va; |
| } |
| |
| static inline void setup_vmalloc_vm(struct vm_struct *vm, |
| struct vmap_area *va, unsigned long flags, const void *caller) |
| { |
| vm->flags = flags; |
| vm->addr = (void *)va->va_start; |
| vm->size = va->va_end - va->va_start; |
| vm->caller = caller; |
| va->vm = vm; |
| } |
| |
| /* |
| * Allocate a region of KVA of the specified size and alignment, within the |
| * vstart and vend. If vm is passed in, the two will also be bound. |
| */ |
| static struct vmap_area *alloc_vmap_area(unsigned long size, |
| unsigned long align, |
| unsigned long vstart, unsigned long vend, |
| int node, gfp_t gfp_mask, |
| unsigned long va_flags, struct vm_struct *vm) |
| { |
| struct vmap_node *vn; |
| struct vmap_area *va; |
| unsigned long freed; |
| unsigned long addr; |
| unsigned int vn_id; |
| int purged = 0; |
| int ret; |
| |
| if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) |
| return ERR_PTR(-EINVAL); |
| |
| if (unlikely(!vmap_initialized)) |
| return ERR_PTR(-EBUSY); |
| |
| might_sleep(); |
| |
| /* |
| * If a VA is obtained from a global heap(if it fails here) |
| * it is anyway marked with this "vn_id" so it is returned |
| * to this pool's node later. Such way gives a possibility |
| * to populate pools based on users demand. |
| * |
| * On success a ready to go VA is returned. |
| */ |
| va = node_alloc(size, align, vstart, vend, &addr, &vn_id); |
| if (!va) { |
| gfp_mask = gfp_mask & GFP_RECLAIM_MASK; |
| |
| va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); |
| if (unlikely(!va)) |
| return ERR_PTR(-ENOMEM); |
| |
| /* |
| * Only scan the relevant parts containing pointers to other objects |
| * to avoid false negatives. |
| */ |
| kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); |
| } |
| |
| retry: |
| if (addr == vend) { |
| preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); |
| addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, |
| size, align, vstart, vend); |
| spin_unlock(&free_vmap_area_lock); |
| } |
| |
| trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); |
| |
| /* |
| * If an allocation fails, the "vend" address is |
| * returned. Therefore trigger the overflow path. |
| */ |
| if (unlikely(addr == vend)) |
| goto overflow; |
| |
| va->va_start = addr; |
| va->va_end = addr + size; |
| va->vm = NULL; |
| va->flags = (va_flags | vn_id); |
| |
| if (vm) { |
| vm->addr = (void *)va->va_start; |
| vm->size = va->va_end - va->va_start; |
| va->vm = vm; |
| } |
| |
| vn = addr_to_node(va->va_start); |
| |
| spin_lock(&vn->busy.lock); |
| insert_vmap_area(va, &vn->busy.root, &vn->busy.head); |
| spin_unlock(&vn->busy.lock); |
| |
| BUG_ON(!IS_ALIGNED(va->va_start, align)); |
| BUG_ON(va->va_start < vstart); |
| BUG_ON(va->va_end > vend); |
| |
| ret = kasan_populate_vmalloc(addr, size); |
| if (ret) { |
| free_vmap_area(va); |
| return ERR_PTR(ret); |
| } |
| |
| return va; |
| |
| overflow: |
| if (!purged) { |
| reclaim_and_purge_vmap_areas(); |
| purged = 1; |
| goto retry; |
| } |
| |
| freed = 0; |
| blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); |
| |
| if (freed > 0) { |
| purged = 0; |
| goto retry; |
| } |
| |
| if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) |
| pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n", |
| size, vstart, vend); |
| |
| kmem_cache_free(vmap_area_cachep, va); |
| return ERR_PTR(-EBUSY); |
| } |
| |
| int register_vmap_purge_notifier(struct notifier_block *nb) |
| { |
| return blocking_notifier_chain_register(&vmap_notify_list, nb); |
| } |
| EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); |
| |
| int unregister_vmap_purge_notifier(struct notifier_block *nb) |
| { |
| return blocking_notifier_chain_unregister(&vmap_notify_list, nb); |
| } |
| EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); |
| |
| /* |
| * lazy_max_pages is the maximum amount of virtual address space we gather up |
| * before attempting to purge with a TLB flush. |
| * |
| * There is a tradeoff here: a larger number will cover more kernel page tables |
| * and take slightly longer to purge, but it will linearly reduce the number of |
| * global TLB flushes that must be performed. It would seem natural to scale |
| * this number up linearly with the number of CPUs (because vmapping activity |
| * could also scale linearly with the number of CPUs), however it is likely |
| * that in practice, workloads might be constrained in other ways that mean |
| * vmap activity will not scale linearly with CPUs. Also, I want to be |
| * conservative and not introduce a big latency on huge systems, so go with |
| * a less aggressive log scale. It will still be an improvement over the old |
| * code, and it will be simple to change the scale factor if we find that it |
| * becomes a problem on bigger systems. |
| */ |
| static unsigned long lazy_max_pages(void) |
| { |
| unsigned int log; |
| |
| log = fls(num_online_cpus()); |
| |
| return log * (32UL * 1024 * 1024 / PAGE_SIZE); |
| } |
| |
| static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); |
| |
| /* |
| * Serialize vmap purging. There is no actual critical section protected |
| * by this lock, but we want to avoid concurrent calls for performance |
| * reasons and to make the pcpu_get_vm_areas more deterministic. |
| */ |
| static DEFINE_MUTEX(vmap_purge_lock); |
| |
| /* for per-CPU blocks */ |
| static void purge_fragmented_blocks_allcpus(void); |
| static cpumask_t purge_nodes; |
| |
| static void |
| reclaim_list_global(struct list_head *head) |
| { |
| struct vmap_area *va, *n; |
| |
| if (list_empty(head)) |
| return; |
| |
| spin_lock(&free_vmap_area_lock); |
| list_for_each_entry_safe(va, n, head, list) |
| merge_or_add_vmap_area_augment(va, |
| &free_vmap_area_root, &free_vmap_area_list); |
| spin_unlock(&free_vmap_area_lock); |
| } |
| |
| static void |
| decay_va_pool_node(struct vmap_node *vn, bool full_decay) |
| { |
| struct vmap_area *va, *nva; |
| struct list_head decay_list; |
| struct rb_root decay_root; |
| unsigned long n_decay; |
| int i; |
| |
| decay_root = RB_ROOT; |
| INIT_LIST_HEAD(&decay_list); |
| |
| for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { |
| struct list_head tmp_list; |
| |
| if (list_empty(&vn->pool[i].head)) |
| continue; |
| |
| INIT_LIST_HEAD(&tmp_list); |
| |
| /* Detach the pool, so no-one can access it. */ |
| spin_lock(&vn->pool_lock); |
| list_replace_init(&vn->pool[i].head, &tmp_list); |
| spin_unlock(&vn->pool_lock); |
| |
| if (full_decay) |
| WRITE_ONCE(vn->pool[i].len, 0); |
| |
| /* Decay a pool by ~25% out of left objects. */ |
| n_decay = vn->pool[i].len >> 2; |
| |
| list_for_each_entry_safe(va, nva, &tmp_list, list) { |
| list_del_init(&va->list); |
| merge_or_add_vmap_area(va, &decay_root, &decay_list); |
| |
| if (!full_decay) { |
| WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1); |
| |
| if (!--n_decay) |
| break; |
| } |
| } |
| |
| /* |
| * Attach the pool back if it has been partly decayed. |
| * Please note, it is supposed that nobody(other contexts) |
| * can populate the pool therefore a simple list replace |
| * operation takes place here. |
| */ |
| if (!full_decay && !list_empty(&tmp_list)) { |
| spin_lock(&vn->pool_lock); |
| list_replace_init(&tmp_list, &vn->pool[i].head); |
| spin_unlock(&vn->pool_lock); |
| } |
| } |
| |
| reclaim_list_global(&decay_list); |
| } |
| |
| static void purge_vmap_node(struct work_struct *work) |
| { |
| struct vmap_node *vn = container_of(work, |
| struct vmap_node, purge_work); |
| struct vmap_area *va, *n_va; |
| LIST_HEAD(local_list); |
| |
| vn->nr_purged = 0; |
| |
| list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { |
| unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; |
| unsigned long orig_start = va->va_start; |
| unsigned long orig_end = va->va_end; |
| unsigned int vn_id = decode_vn_id(va->flags); |
| |
| list_del_init(&va->list); |
| |
| if (is_vmalloc_or_module_addr((void *)orig_start)) |
| kasan_release_vmalloc(orig_start, orig_end, |
| va->va_start, va->va_end); |
| |
| atomic_long_sub(nr, &vmap_lazy_nr); |
| vn->nr_purged++; |
| |
| if (is_vn_id_valid(vn_id) && !vn->skip_populate) |
| if (node_pool_add_va(vn, va)) |
| continue; |
| |
| /* Go back to global. */ |
| list_add(&va->list, &local_list); |
| } |
| |
| reclaim_list_global(&local_list); |
| } |
| |
| /* |
| * Purges all lazily-freed vmap areas. |
| */ |
| static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, |
| bool full_pool_decay) |
| { |
| unsigned long nr_purged_areas = 0; |
| unsigned int nr_purge_helpers; |
| unsigned int nr_purge_nodes; |
| struct vmap_node *vn; |
| int i; |
| |
| lockdep_assert_held(&vmap_purge_lock); |
| |
| /* |
| * Use cpumask to mark which node has to be processed. |
| */ |
| purge_nodes = CPU_MASK_NONE; |
| |
| for (i = 0; i < nr_vmap_nodes; i++) { |
| vn = &vmap_nodes[i]; |
| |
| INIT_LIST_HEAD(&vn->purge_list); |
| vn->skip_populate = full_pool_decay; |
| decay_va_pool_node(vn, full_pool_decay); |
| |
| if (RB_EMPTY_ROOT(&vn->lazy.root)) |
| continue; |
| |
| spin_lock(&vn->lazy.lock); |
| WRITE_ONCE(vn->lazy.root.rb_node, NULL); |
| list_replace_init(&vn->lazy.head, &vn->purge_list); |
| spin_unlock(&vn->lazy.lock); |
| |
| start = min(start, list_first_entry(&vn->purge_list, |
| struct vmap_area, list)->va_start); |
| |
| end = max(end, list_last_entry(&vn->purge_list, |
| struct vmap_area, list)->va_end); |
| |
| cpumask_set_cpu(i, &purge_nodes); |
| } |
| |
| nr_purge_nodes = cpumask_weight(&purge_nodes); |
| if (nr_purge_nodes > 0) { |
| flush_tlb_kernel_range(start, end); |
| |
| /* One extra worker is per a lazy_max_pages() full set minus one. */ |
| nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); |
| nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; |
| |
| for_each_cpu(i, &purge_nodes) { |
| vn = &vmap_nodes[i]; |
| |
| if (nr_purge_helpers > 0) { |
| INIT_WORK(&vn->purge_work, purge_vmap_node); |
| |
| if (cpumask_test_cpu(i, cpu_online_mask)) |
| schedule_work_on(i, &vn->purge_work); |
| else |
| schedule_work(&vn->purge_work); |
| |
| nr_purge_helpers--; |
| } else { |
| vn->purge_work.func = NULL; |
| purge_vmap_node(&vn->purge_work); |
| nr_purged_areas += vn->nr_purged; |
| } |
| } |
| |
| for_each_cpu(i, &purge_nodes) { |
| vn = &vmap_nodes[i]; |
| |
| if (vn->purge_work.func) { |
| flush_work(&vn->purge_work); |
| nr_purged_areas += vn->nr_purged; |
| } |
| } |
| } |
| |
| trace_purge_vmap_area_lazy(start, end, nr_purged_areas); |
| return nr_purged_areas > 0; |
| } |
| |
| /* |
| * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. |
| */ |
| static void reclaim_and_purge_vmap_areas(void) |
| |
| { |
| mutex_lock(&vmap_purge_lock); |
| purge_fragmented_blocks_allcpus(); |
| __purge_vmap_area_lazy(ULONG_MAX, 0, true); |
| mutex_unlock(&vmap_purge_lock); |
| } |
| |
| static void drain_vmap_area_work(struct work_struct *work) |
| { |
| mutex_lock(&vmap_purge_lock); |
| __purge_vmap_area_lazy(ULONG_MAX, 0, false); |
| mutex_unlock(&vmap_purge_lock); |
| } |
| |
| /* |
| * Free a vmap area, caller ensuring that the area has been unmapped, |
| * unlinked and flush_cache_vunmap had been called for the correct |
| * range previously. |
| */ |
| static void free_vmap_area_noflush(struct vmap_area *va) |
| { |
| unsigned long nr_lazy_max = lazy_max_pages(); |
| unsigned long va_start = va->va_start; |
| unsigned int vn_id = decode_vn_id(va->flags); |
| struct vmap_node *vn; |
| unsigned long nr_lazy; |
| |
| if (WARN_ON_ONCE(!list_empty(&va->list))) |
| return; |
| |
| nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> |
| PAGE_SHIFT, &vmap_lazy_nr); |
| |
| /* |
| * If it was request by a certain node we would like to |
| * return it to that node, i.e. its pool for later reuse. |
| */ |
| vn = is_vn_id_valid(vn_id) ? |
| id_to_node(vn_id):addr_to_node(va->va_start); |
| |
| spin_lock(&vn->lazy.lock); |
| insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); |
| spin_unlock(&vn->lazy.lock); |
| |
| trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); |
| |
| /* After this point, we may free va at any time */ |
| if (unlikely(nr_lazy > nr_lazy_max)) |
| schedule_work(&drain_vmap_work); |
| } |
| |
| /* |
| * Free and unmap a vmap area |
| */ |
| static void free_unmap_vmap_area(struct vmap_area *va) |
| { |
| flush_cache_vunmap(va->va_start, va->va_end); |
| vunmap_range_noflush(va->va_start, va->va_end); |
| if (debug_pagealloc_enabled_static()) |
| flush_tlb_kernel_range(va->va_start, va->va_end); |
| |
| free_vmap_area_noflush(va); |
| } |
| |
| struct vmap_area *find_vmap_area(unsigned long addr) |
| { |
| struct vmap_node *vn; |
| struct vmap_area *va; |
| int i, j; |
| |
| if (unlikely(!vmap_initialized)) |
| return NULL; |
| |
| /* |
| * An addr_to_node_id(addr) converts an address to a node index |
| * where a VA is located. If VA spans several zones and passed |
| * addr is not the same as va->va_start, what is not common, we |
| * may need to scan extra nodes. See an example: |
| * |
| * <----va----> |
| * -|-----|-----|-----|-----|- |
| * 1 2 0 1 |
| * |
| * VA resides in node 1 whereas it spans 1, 2 an 0. If passed |
| * addr is within 2 or 0 nodes we should do extra work. |
| */ |
| i = j = addr_to_node_id(addr); |
| do { |
| vn = &vmap_nodes[i]; |
| |
| spin_lock(&vn->busy.lock); |
| va = __find_vmap_area(addr, &vn->busy.root); |
| spin_unlock(&vn->busy.lock); |
| |
| if (va) |
| return va; |
| } while ((i = (i + 1) % nr_vmap_nodes) != j); |
| |
| return NULL; |
| } |
| |
| static struct vmap_area *find_unlink_vmap_area(unsigned long addr) |
| { |
| struct vmap_node *vn; |
| struct vmap_area *va; |
| int i, j; |
| |
| /* |
| * Check the comment in the find_vmap_area() about the loop. |
| */ |
| i = j = addr_to_node_id(addr); |
| do { |
| vn = &vmap_nodes[i]; |
| |
| spin_lock(&vn->busy.lock); |
| va = __find_vmap_area(addr, &vn->busy.root); |
| if (va) |
| unlink_va(va, &vn->busy.root); |
| spin_unlock(&vn->busy.lock); |
| |
| if (va) |
| return va; |
| } while ((i = (i + 1) % nr_vmap_nodes) != j); |
| |
| return NULL; |
| } |
| |
| /*** Per cpu kva allocator ***/ |
| |
| /* |
| * vmap space is limited especially on 32 bit architectures. Ensure there is |
| * room for at least 16 percpu vmap blocks per CPU. |
| */ |
| /* |
| * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able |
| * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess |
| * instead (we just need a rough idea) |
| */ |
| #if BITS_PER_LONG == 32 |
| #define VMALLOC_SPACE (128UL*1024*1024) |
| #else |
| #define VMALLOC_SPACE (128UL*1024*1024*1024) |
| #endif |
| |
| #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) |
| #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ |
| #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ |
| #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) |
| #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ |
| #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ |
| #define VMAP_BBMAP_BITS \ |
| VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ |
| VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ |
| VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) |
| |
| #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) |
| |
| /* |
| * Purge threshold to prevent overeager purging of fragmented blocks for |
| * regular operations: Purge if vb->free is less than 1/4 of the capacity. |
| */ |
| #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) |
| |
| #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ |
| #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ |
| #define VMAP_FLAGS_MASK 0x3 |
| |
| struct vmap_block_queue { |
| spinlock_t lock; |
| struct list_head free; |
| |
| /* |
| * An xarray requires an extra memory dynamically to |
| * be allocated. If it is an issue, we can use rb-tree |
| * instead. |
| */ |
| struct xarray vmap_blocks; |
| }; |
| |
| struct vmap_block { |
| spinlock_t lock; |
| struct vmap_area *va; |
| unsigned long free, dirty; |
| DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); |
| unsigned long dirty_min, dirty_max; /*< dirty range */ |
| struct list_head free_list; |
| struct rcu_head rcu_head; |
| struct list_head purge; |
| unsigned int cpu; |
| }; |
| |
| /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ |
| static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); |
| |
| /* |
| * In order to fast access to any "vmap_block" associated with a |
| * specific address, we use a hash. |
| * |
| * A per-cpu vmap_block_queue is used in both ways, to serialize |
| * an access to free block chains among CPUs(alloc path) and it |
| * also acts as a vmap_block hash(alloc/free paths). It means we |
| * overload it, since we already have the per-cpu array which is |
| * used as a hash table. When used as a hash a 'cpu' passed to |
| * per_cpu() is not actually a CPU but rather a hash index. |
| * |
| * A hash function is addr_to_vb_xa() which hashes any address |
| * to a specific index(in a hash) it belongs to. This then uses a |
| * per_cpu() macro to access an array with generated index. |
| * |
| * An example: |
| * |
| * CPU_1 CPU_2 CPU_0 |
| * | | | |
| * V V V |
| * 0 10 20 30 40 50 60 |
| * |------|------|------|------|------|------|...<vmap address space> |
| * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 |
| * |
| * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus |
| * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; |
| * |
| * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus |
| * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; |
| * |
| * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus |
| * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. |
| * |
| * This technique almost always avoids lock contention on insert/remove, |
| * however xarray spinlocks protect against any contention that remains. |
| */ |
| static struct xarray * |
| addr_to_vb_xa(unsigned long addr) |
| { |
| int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; |
| |
| /* |
| * Please note, nr_cpu_ids points on a highest set |
| * possible bit, i.e. we never invoke cpumask_next() |
| * if an index points on it which is nr_cpu_ids - 1. |
| */ |
| if (!cpu_possible(index)) |
| index = cpumask_next(index, cpu_possible_mask); |
| |
| return &per_cpu(vmap_block_queue, index).vmap_blocks; |
| } |
| |
| /* |
| * We should probably have a fallback mechanism to allocate virtual memory |
| * out of partially filled vmap blocks. However vmap block sizing should be |
| * fairly reasonable according to the vmalloc size, so it shouldn't be a |
| * big problem. |
| */ |
| |
| static unsigned long addr_to_vb_idx(unsigned long addr) |
| { |
| addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); |
| addr /= VMAP_BLOCK_SIZE; |
| return addr; |
| } |
| |
| static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) |
| { |
| unsigned long addr; |
| |
| addr = va_start + (pages_off << PAGE_SHIFT); |
| BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); |
| return (void *)addr; |
| } |
| |
| /** |
| * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this |
| * block. Of course pages number can't exceed VMAP_BBMAP_BITS |
| * @order: how many 2^order pages should be occupied in newly allocated block |
| * @gfp_mask: flags for the page level allocator |
| * |
| * Return: virtual address in a newly allocated block or ERR_PTR(-errno) |
| */ |
| static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) |
| { |
| struct vmap_block_queue *vbq; |
| struct vmap_block *vb; |
| struct vmap_area *va; |
| struct xarray *xa; |
| unsigned long vb_idx; |
| int node, err; |
| void *vaddr; |
| |
| node = numa_node_id(); |
| |
| vb = kmalloc_node(sizeof(struct vmap_block), |
| gfp_mask & GFP_RECLAIM_MASK, node); |
| if (unlikely(!vb)) |
| return ERR_PTR(-ENOMEM); |
| |
| va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, |
| VMALLOC_START, VMALLOC_END, |
| node, gfp_mask, |
| VMAP_RAM|VMAP_BLOCK, NULL); |
| if (IS_ERR(va)) { |
| kfree(vb); |
| return ERR_CAST(va); |
| } |
| |
| vaddr = vmap_block_vaddr(va->va_start, 0); |
| spin_lock_init(&vb->lock); |
| vb->va = va; |
| /* At least something should be left free */ |
| BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); |
| bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); |
| vb->free = VMAP_BBMAP_BITS - (1UL << order); |
| vb->dirty = 0; |
| vb->dirty_min = VMAP_BBMAP_BITS; |
| vb->dirty_max = 0; |
| bitmap_set(vb->used_map, 0, (1UL << order)); |
| INIT_LIST_HEAD(&vb->free_list); |
| |
| xa = addr_to_vb_xa(va->va_start); |
| vb_idx = addr_to_vb_idx(va->va_start); |
| err = xa_insert(xa, vb_idx, vb, gfp_mask); |
| if (err) { |
| kfree(vb); |
| free_vmap_area(va); |
| return ERR_PTR(err); |
| } |
| /* |
| * list_add_tail_rcu could happened in another core |
| * rather than vb->cpu due to task migration, which |
| * is safe as list_add_tail_rcu will ensure the list's |
| * integrity together with list_for_each_rcu from read |
| * side. |
| */ |
| vb->cpu = raw_smp_processor_id(); |
| vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); |
| spin_lock(&vbq->lock); |
| list_add_tail_rcu(&vb->free_list, &vbq->free); |
| spin_unlock(&vbq->lock); |
| |
| return vaddr; |
| } |
| |
| static void free_vmap_block(struct vmap_block *vb) |
| { |
| struct vmap_node *vn; |
| struct vmap_block *tmp; |
| struct xarray *xa; |
| |
| xa = addr_to_vb_xa(vb->va->va_start); |
| tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); |
| BUG_ON(tmp != vb); |
| |
| vn = addr_to_node(vb->va->va_start); |
| spin_lock(&vn->busy.lock); |
| unlink_va(vb->va, &vn->busy.root); |
| spin_unlock(&vn->busy.lock); |
| |
| free_vmap_area_noflush(vb->va); |
| kfree_rcu(vb, rcu_head); |
| } |
| |
| static bool purge_fragmented_block(struct vmap_block *vb, |
| struct list_head *purge_list, bool force_purge) |
| { |
| struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); |
| |
| if (vb->free + vb->dirty != VMAP_BBMAP_BITS || |
| vb->dirty == VMAP_BBMAP_BITS) |
| return false; |
| |
| /* Don't overeagerly purge usable blocks unless requested */ |
| if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) |
| return false; |
| |
| /* prevent further allocs after releasing lock */ |
| WRITE_ONCE(vb->free, 0); |
| /* prevent purging it again */ |
| WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); |
| vb->dirty_min = 0; |
| vb->dirty_max = VMAP_BBMAP_BITS; |
| spin_lock(&vbq->lock); |
| list_del_rcu(&vb->free_list); |
| spin_unlock(&vbq->lock); |
| list_add_tail(&vb->purge, purge_list); |
| return true; |
| } |
| |
| static void free_purged_blocks(struct list_head *purge_list) |
| { |
| struct vmap_block *vb, *n_vb; |
| |
| list_for_each_entry_safe(vb, n_vb, purge_list, purge) { |
| list_del(&vb->purge); |
| free_vmap_block(vb); |
| } |
| } |
| |
| static void purge_fragmented_blocks(int cpu) |
| { |
| LIST_HEAD(purge); |
| struct vmap_block *vb; |
| struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
| unsigned long free = READ_ONCE(vb->free); |
| unsigned long dirty = READ_ONCE(vb->dirty); |
| |
| if (free + dirty != VMAP_BBMAP_BITS || |
| dirty == VMAP_BBMAP_BITS) |
| continue; |
| |
| spin_lock(&vb->lock); |
| purge_fragmented_block(vb, &purge, true); |
| spin_unlock(&vb->lock); |
| } |
| rcu_read_unlock(); |
| free_purged_blocks(&purge); |
| } |
| |
| static void purge_fragmented_blocks_allcpus(void) |
| { |
| int cpu; |
| |
| for_each_possible_cpu(cpu) |
| purge_fragmented_blocks(cpu); |
| } |
| |
| static void *vb_alloc(unsigned long size, gfp_t gfp_mask) |
| { |
| struct vmap_block_queue *vbq; |
| struct vmap_block *vb; |
| void *vaddr = NULL; |
| unsigned int order; |
| |
| BUG_ON(offset_in_page(size)); |
| BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
| if (WARN_ON(size == 0)) { |
| /* |
| * Allocating 0 bytes isn't what caller wants since |
| * get_order(0) returns funny result. Just warn and terminate |
| * early. |
| */ |
| return ERR_PTR(-EINVAL); |
| } |
| order = get_order(size); |
| |
| rcu_read_lock(); |
| vbq = raw_cpu_ptr(&vmap_block_queue); |
| list_for_each_entry_rcu(vb, &vbq->free, free_list) { |
| unsigned long pages_off; |
| |
| if (READ_ONCE(vb->free) < (1UL << order)) |
| continue; |
| |
| spin_lock(&vb->lock); |
| if (vb->free < (1UL << order)) { |
| spin_unlock(&vb->lock); |
| continue; |
| } |
| |
| pages_off = VMAP_BBMAP_BITS - vb->free; |
| vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); |
| WRITE_ONCE(vb->free, vb->free - (1UL << order)); |
| bitmap_set(vb->used_map, pages_off, (1UL << order)); |
| if (vb->free == 0) { |
| spin_lock(&vbq->lock); |
| list_del_rcu(&vb->free_list); |
| spin_unlock(&vbq->lock); |
| } |
| |
| spin_unlock(&vb->lock); |
| break; |
| } |
| |
| rcu_read_unlock(); |
| |
| /* Allocate new block if nothing was found */ |
| if (!vaddr) |
| vaddr = new_vmap_block(order, gfp_mask); |
| |
| return vaddr; |
| } |
| |
| static void vb_free(unsigned long addr, unsigned long size) |
| { |
| unsigned long offset; |
| unsigned int order; |
| struct vmap_block *vb; |
| struct xarray *xa; |
| |
| BUG_ON(offset_in_page(size)); |
| BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
| |
| flush_cache_vunmap(addr, addr + size); |
| |
| order = get_order(size); |
| offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; |
| |
| xa = addr_to_vb_xa(addr); |
| vb = xa_load(xa, addr_to_vb_idx(addr)); |
| |
| spin_lock(&vb->lock); |
| bitmap_clear(vb->used_map, offset, (1UL << order)); |
| spin_unlock(&vb->lock); |
| |
| vunmap_range_noflush(addr, addr + size); |
| |
| if (debug_pagealloc_enabled_static()) |
| flush_tlb_kernel_range(addr, addr + size); |
| |
| spin_lock(&vb->lock); |
| |
| /* Expand the not yet TLB flushed dirty range */ |
| vb->dirty_min = min(vb->dirty_min, offset); |
| vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); |
| |
| WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); |
| if (vb->dirty == VMAP_BBMAP_BITS) { |
| BUG_ON(vb->free); |
| spin_unlock(&vb->lock); |
| free_vmap_block(vb); |
| } else |
| spin_unlock(&vb->lock); |
| } |
| |
| static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) |
| { |
| LIST_HEAD(purge_list); |
| int cpu; |
| |
| if (unlikely(!vmap_initialized)) |
| return; |
| |
| mutex_lock(&vmap_purge_lock); |
| |
| for_each_possible_cpu(cpu) { |
| struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); |
| struct vmap_block *vb; |
| unsigned long idx; |
| |
| rcu_read_lock(); |
| xa_for_each(&vbq->vmap_blocks, idx, vb) { |
| spin_lock(&vb->lock); |
| |
| /* |
| * Try to purge a fragmented block first. If it's |
| * not purgeable, check whether there is dirty |
| * space to be flushed. |
| */ |
| if (!purge_fragmented_block(vb, &purge_list, false) && |
| vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { |
| unsigned long va_start = vb->va->va_start; |
| unsigned long s, e; |
| |
| s = va_start + (vb->dirty_min << PAGE_SHIFT); |
| e = va_start + (vb->dirty_max << PAGE_SHIFT); |
| |
| start = min(s, start); |
| end = max(e, end); |
| |
| /* Prevent that this is flushed again */ |
| vb->dirty_min = VMAP_BBMAP_BITS; |
| vb->dirty_max = 0; |
| |
| flush = 1; |
| } |
| spin_unlock(&vb->lock); |
| } |
| rcu_read_unlock(); |
| } |
| free_purged_blocks(&purge_list); |
| |
| if (!__purge_vmap_area_lazy(start, end, false) && flush) |
| flush_tlb_kernel_range(start, end); |
| mutex_unlock(&vmap_purge_lock); |
| } |
| |
| /** |
| * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer |
| * |
| * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily |
| * to amortize TLB flushing overheads. What this means is that any page you |
| * have now, may, in a former life, have been mapped into kernel virtual |
| * address by the vmap layer and so there might be some CPUs with TLB entries |
| * still referencing that page (additional to the regular 1:1 kernel mapping). |
| * |
| * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can |
| * be sure that none of the pages we have control over will have any aliases |
| * from the vmap layer. |
| */ |
| void vm_unmap_aliases(void) |
| { |
| unsigned long start = ULONG_MAX, end = 0; |
| int flush = 0; |
| |
| _vm_unmap_aliases(start, end, flush); |
| } |
| EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
| |
| /** |
| * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram |
| * @mem: the pointer returned by vm_map_ram |
| * @count: the count passed to that vm_map_ram call (cannot unmap partial) |
| */ |
| void vm_unmap_ram(const void *mem, unsigned int count) |
| { |
| unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| unsigned long addr = (unsigned long)kasan_reset_tag(mem); |
| struct vmap_area *va; |
| |
| might_sleep(); |
| BUG_ON(!addr); |
| BUG_ON(addr < VMALLOC_START); |
| BUG_ON(addr > VMALLOC_END); |
| BUG_ON(!PAGE_ALIGNED(addr)); |
| |
| kasan_poison_vmalloc(mem, size); |
| |
| if (likely(count <= VMAP_MAX_ALLOC)) { |
| debug_check_no_locks_freed(mem, size); |
| vb_free(addr, size); |
| return; |
| } |
| |
| va = find_unlink_vmap_area(addr); |
| if (WARN_ON_ONCE(!va)) |
| return; |
| |
| debug_check_no_locks_freed((void *)va->va_start, |
| (va->va_end - va->va_start)); |
| free_unmap_vmap_area(va); |
| } |
| EXPORT_SYMBOL(vm_unmap_ram); |
| |
| /** |
| * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) |
| * @pages: an array of pointers to the pages to be mapped |
| * @count: number of pages |
| * @node: prefer to allocate data structures on this node |
| * |
| * If you use this function for less than VMAP_MAX_ALLOC pages, it could be |
| * faster than vmap so it's good. But if you mix long-life and short-life |
| * objects with vm_map_ram(), it could consume lots of address space through |
| * fragmentation (especially on a 32bit machine). You could see failures in |
| * the end. Please use this function for short-lived objects. |
| * |
| * Returns: a pointer to the address that has been mapped, or %NULL on failure |
| */ |
| void *vm_map_ram(struct page **pages, unsigned int count, int node) |
| { |
| unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| unsigned long addr; |
| void *mem; |
| |
| if (likely(count <= VMAP_MAX_ALLOC)) { |
| mem = vb_alloc(size, GFP_KERNEL); |
| if (IS_ERR(mem)) |
| return NULL; |
| addr = (unsigned long)mem; |
| } else { |
| struct vmap_area *va; |
| va = alloc_vmap_area(size, PAGE_SIZE, |
| VMALLOC_START, VMALLOC_END, |
| node, GFP_KERNEL, VMAP_RAM, |
| NULL); |
| if (IS_ERR(va)) |
| return NULL; |
| |
| addr = va->va_start; |
| mem = (void *)addr; |
| } |
| |
| if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, |
| pages, PAGE_SHIFT) < 0) { |
| vm_unmap_ram(mem, count); |
| return NULL; |
| } |
| |
| /* |
| * Mark the pages as accessible, now that they are mapped. |
| * With hardware tag-based KASAN, marking is skipped for |
| * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). |
| */ |
| mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); |
| |
| return mem; |
| } |
| EXPORT_SYMBOL(vm_map_ram); |
| |
| static struct vm_struct *vmlist __initdata; |
| |
| static inline unsigned int vm_area_page_order(struct vm_struct *vm) |
| { |
| #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC |
| return vm->page_order; |
| #else |
| return 0; |
| #endif |
| } |
| |
| static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) |
| { |
| #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC |
| vm->page_order = order; |
| #else |
| BUG_ON(order != 0); |
| #endif |
| } |
| |
| /** |
| * vm_area_add_early - add vmap area early during boot |
| * @vm: vm_struct to add |
| * |
| * This function is used to add fixed kernel vm area to vmlist before |
| * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags |
| * should contain proper values and the other fields should be zero. |
| * |
| * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. |
| */ |
| void __init vm_area_add_early(struct vm_struct *vm) |
| { |
| struct vm_struct *tmp, **p; |
| |
| BUG_ON(vmap_initialized); |
| for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { |
| if (tmp->addr >= vm->addr) { |
| BUG_ON(tmp->addr < vm->addr + vm->size); |
| break; |
| } else |
| BUG_ON(tmp->addr + tmp->size > vm->addr); |
| } |
| vm->next = *p; |
| *p = vm; |
| } |
| |
| /** |
| * vm_area_register_early - register vmap area early during boot |
| * @vm: vm_struct to register |
| * @align: requested alignment |
| * |
| * This function is used to register kernel vm area before |
| * vmalloc_init() is called. @vm->size and @vm->flags should contain |
| * proper values on entry and other fields should be zero. On return, |
| * vm->addr contains the allocated address. |
| * |
| * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. |
| */ |
| void __init vm_area_register_early(struct vm_struct *vm, size_t align) |
| { |
| unsigned long addr = ALIGN(VMALLOC_START, align); |
| struct vm_struct *cur, **p; |
| |
| BUG_ON(vmap_initialized); |
| |
| for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { |
| if ((unsigned long)cur->addr - addr >= vm->size) |
| break; |
| addr = ALIGN((unsigned long)cur->addr + cur->size, align); |
| } |
| |
| BUG_ON(addr > VMALLOC_END - vm->size); |
| vm->addr = (void *)addr; |
| vm->next = *p; |
| *p = vm; |
| kasan_populate_early_vm_area_shadow(vm->addr, vm->size); |
| } |
| |
| static void clear_vm_uninitialized_flag(struct vm_struct *vm) |
| { |
| /* |
| * Before removing VM_UNINITIALIZED, |
| * we should make sure that vm has proper values. |
| * Pair with smp_rmb() in show_numa_info(). |
| */ |
| smp_wmb(); |
| vm->flags &= ~VM_UNINITIALIZED; |
| } |
| |
| static struct vm_struct *__get_vm_area_node(unsigned long size, |
| unsigned long align, unsigned long shift, unsigned long flags, |
| unsigned long start, unsigned long end, int node, |
| gfp_t gfp_mask, const void *caller) |
| { |
| struct vmap_area *va; |
| struct vm_struct *area; |
| unsigned long requested_size = size; |
| |
| BUG_ON(in_interrupt()); |
| size = ALIGN(size, 1ul << shift); |
| if (unlikely(!size)) |
| return NULL; |
| |
| if (flags & VM_IOREMAP) |
| align = 1ul << clamp_t(int, get_count_order_long(size), |
| PAGE_SHIFT, IOREMAP_MAX_ORDER); |
| |
| area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); |
| if (unlikely(!area)) |
| return NULL; |
| |
| if (!(flags & VM_NO_GUARD)) |
| size += PAGE_SIZE; |
| |
| area->flags = flags; |
| area->caller = caller; |
| |
| va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); |
| if (IS_ERR(va)) { |
| kfree(area); |
| return NULL; |
| } |
| |
| /* |
| * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a |
| * best-effort approach, as they can be mapped outside of vmalloc code. |
| * For VM_ALLOC mappings, the pages are marked as accessible after |
| * getting mapped in __vmalloc_node_range(). |
| * With hardware tag-based KASAN, marking is skipped for |
| * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). |
| */ |
| if (!(flags & VM_ALLOC)) |
| area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, |
| KASAN_VMALLOC_PROT_NORMAL); |
| |
| return area; |
| } |
| |
| struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, |
| unsigned long start, unsigned long end, |
| const void *caller) |
| { |
| return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, |
| NUMA_NO_NODE, GFP_KERNEL, caller); |
| } |
| |
| /** |
| * get_vm_area - reserve a contiguous kernel virtual area |
| * @size: size of the area |
| * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC |
| * |
| * Search an area of @size in the kernel virtual mapping area, |
| * and reserved it for out purposes. Returns the area descriptor |
| * on success or %NULL on failure. |
| * |
| * Return: the area descriptor on success or %NULL on failure. |
| */ |
| struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
| { |
| return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, |
| VMALLOC_START, VMALLOC_END, |
| NUMA_NO_NODE, GFP_KERNEL, |
| __builtin_return_address(0)); |
| } |
| |
| struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, |
| const void *caller) |
| { |
| return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, |
| VMALLOC_START, VMALLOC_END, |
| NUMA_NO_NODE,
|