root/include/linux/highmem.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. flush_anon_page
  2. flush_kernel_dcache_page
  3. flush_kernel_vmap_range
  4. invalidate_kernel_vmap_range
  5. nr_free_highpages
  6. kmap_to_page
  7. kmap
  8. kunmap
  9. kmap_atomic
  10. __kunmap_atomic
  11. kmap_atomic_idx_push
  12. kmap_atomic_idx
  13. kmap_atomic_idx_pop
  14. clear_user_highpage
  15. __alloc_zeroed_user_highpage
  16. alloc_zeroed_user_highpage_movable
  17. clear_highpage
  18. zero_user_segments
  19. zero_user_segment
  20. zero_user
  21. copy_user_highpage
  22. copy_highpage

#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H

#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>

#include <asm/cacheflush.h>

#ifndef ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
}
#endif

#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
}
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
}
#endif

#include <asm/kmap_types.h>

#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>

/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern unsigned long totalhigh_pages;

void kmap_flush_unused(void);

struct page *kmap_to_page(void *addr);

#else /* CONFIG_HIGHMEM */

static inline unsigned int nr_free_highpages(void) { return 0; }

static inline struct page *kmap_to_page(void *addr)
{
        return virt_to_page(addr);
}

#define totalhigh_pages 0UL

#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
        might_sleep();
        return page_address(page);
}

static inline void kunmap(struct page *page)
{
}

static inline void *kmap_atomic(struct page *page)
{
        pagefault_disable();
        return page_address(page);
}
#define kmap_atomic_prot(page, prot)    kmap_atomic(page)

static inline void __kunmap_atomic(void *addr)
{
        pagefault_enable();
}

#define kmap_atomic_pfn(pfn)    kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr)        virt_to_page(ptr)

#define kmap_flush_unused()     do {} while(0)
#endif

#endif /* CONFIG_HIGHMEM */

#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)

DECLARE_PER_CPU(int, __kmap_atomic_idx);

static inline int kmap_atomic_idx_push(void)
{
        int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;

#ifdef CONFIG_DEBUG_HIGHMEM
        WARN_ON_ONCE(in_irq() && !irqs_disabled());
        BUG_ON(idx >= KM_TYPE_NR);
#endif
        return idx;
}

static inline int kmap_atomic_idx(void)
{
        return __this_cpu_read(__kmap_atomic_idx) - 1;
}

static inline void kmap_atomic_idx_pop(void)
{
#ifdef CONFIG_DEBUG_HIGHMEM
        int idx = __this_cpu_dec_return(__kmap_atomic_idx);

        BUG_ON(idx < 0);
#else
        __this_cpu_dec(__kmap_atomic_idx);
#endif
}

#endif

/*
 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
 */
#define kunmap_atomic(addr)                                     \
do {                                                            \
        BUILD_BUG_ON(__same_type((addr), struct page *));       \
        __kunmap_atomic(addr);                                  \
} while (0)


/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
        void *addr = kmap_atomic(page);
        clear_user_page(addr, vaddr, page);
        kunmap_atomic(addr);
}
#endif

#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA but the caller is expected
 * to specify via movableflags whether the page will be movable in the
 * future or not
 *
 * An architecture may override this function by defining
 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
 * implementation.
 */
static inline struct page *
__alloc_zeroed_user_highpage(gfp_t movableflags,
                        struct vm_area_struct *vma,
                        unsigned long vaddr)
{
        struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
                        vma, vaddr);

        if (page)
                clear_user_highpage(page, vaddr);

        return page;
}
#endif

/**
 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA that the caller knows will
 * be able to migrate in the future using move_pages() or reclaimed
 */
static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
                                        unsigned long vaddr)
{
        return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
}

static inline void clear_highpage(struct page *page)
{
        void *kaddr = kmap_atomic(page);
        clear_page(kaddr);
        kunmap_atomic(kaddr);
}

static inline void zero_user_segments(struct page *page,
        unsigned start1, unsigned end1,
        unsigned start2, unsigned end2)
{
        void *kaddr = kmap_atomic(page);

        BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);

        if (end1 > start1)
                memset(kaddr + start1, 0, end1 - start1);

        if (end2 > start2)
                memset(kaddr + start2, 0, end2 - start2);

        kunmap_atomic(kaddr);
        flush_dcache_page(page);
}

static inline void zero_user_segment(struct page *page,
        unsigned start, unsigned end)
{
        zero_user_segments(page, start, end, 0, 0);
}

static inline void zero_user(struct page *page,
        unsigned start, unsigned size)
{
        zero_user_segments(page, start, start + size, 0, 0);
}

#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE

static inline void copy_user_highpage(struct page *to, struct page *from,
        unsigned long vaddr, struct vm_area_struct *vma)
{
        char *vfrom, *vto;

        vfrom = kmap_atomic(from);
        vto = kmap_atomic(to);
        copy_user_page(vto, vfrom, vaddr, to);
        kunmap_atomic(vto);
        kunmap_atomic(vfrom);
}

#endif

static inline void copy_highpage(struct page *to, struct page *from)
{
        char *vfrom, *vto;

        vfrom = kmap_atomic(from);
        vto = kmap_atomic(to);
        copy_page(vto, vfrom);
        kunmap_atomic(vto);
        kunmap_atomic(vfrom);
}

#endif /* _LINUX_HIGHMEM_H */

/* [<][>][^][v][top][bottom][index][help] */