?? nommu.c
字號:
/* * linux/mm/nommu.c * * Replacement code for mm functions to support CPU's that don't * have any form of memory management unit (thus no virtual memory). * * See Documentation/nommu-mmap.txt * * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> */#include <linux/module.h>#include <linux/mm.h>#include <linux/mman.h>#include <linux/swap.h>#include <linux/file.h>#include <linux/highmem.h>#include <linux/pagemap.h>#include <linux/slab.h>#include <linux/vmalloc.h>#include <linux/tracehook.h>#include <linux/blkdev.h>#include <linux/backing-dev.h>#include <linux/mount.h>#include <linux/personality.h>#include <linux/security.h>#include <linux/syscalls.h>#include <asm/uaccess.h>#include <asm/tlb.h>#include <asm/tlbflush.h>#include "internal.h"static inline __attribute__((format(printf, 1, 2)))void no_printk(const char *fmt, ...){}#if 0#define kenter(FMT, ...) \ printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)#define kleave(FMT, ...) \ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)#define kdebug(FMT, ...) \ printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)#else#define kenter(FMT, ...) \ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)#define kleave(FMT, ...) \ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)#define kdebug(FMT, ...) \ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)#endif#include "internal.h"void *high_memory;struct page *mem_map;unsigned long max_mapnr;unsigned long num_physpages;atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */int sysctl_overcommit_ratio = 50; /* default is 50% */int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;int sysctl_nr_trim_pages = 1; /* page trimming behaviour */int heap_stack_gap = 0;atomic_t mmap_pages_allocated;EXPORT_SYMBOL(mem_map);EXPORT_SYMBOL(num_physpages);/* list of mapped, potentially shareable regions */static struct kmem_cache *vm_region_jar;struct rb_root nommu_region_tree = RB_ROOT;DECLARE_RWSEM(nommu_region_sem);struct vm_operations_struct generic_file_vm_ops = {};/* * Handle all mappings that got truncated by a "truncate()" * system call. * * NOTE! We have to be ready to update the memory sharing * between the file and the memory map for a potential last * incomplete page. Ugly, but necessary. */int vmtruncate(struct inode *inode, loff_t offset){ struct address_space *mapping = inode->i_mapping; unsigned long limit; if (inode->i_size < offset) goto do_expand; i_size_write(inode, offset); truncate_inode_pages(mapping, offset); goto out_truncate;do_expand: limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out; i_size_write(inode, offset);out_truncate: if (inode->i_op->truncate) inode->i_op->truncate(inode); return 0;out_sig: send_sig(SIGXFSZ, current, 0);out: return -EFBIG;}EXPORT_SYMBOL(vmtruncate);/* * Return the total memory allocated for this pointer, not * just what the caller asked for. * * Doesn't have to be accurate, i.e. may have races. */unsigned int kobjsize(const void *objp){ struct page *page; /* * If the object we have should not have ksize performed on it, * return size of 0 */ if (!objp || !virt_addr_valid(objp)) return 0; page = virt_to_head_page(objp); /* * If the allocator sets PageSlab, we know the pointer came from * kmalloc(). */ if (PageSlab(page)) return ksize(objp); /* * If it's not a compound page, see if we have a matching VMA * region. This test is intentionally done in reverse order, * so if there's no VMA, we still fall through and hand back * PAGE_SIZE for 0-order pages. */ if (!PageCompound(page)) { struct vm_area_struct *vma; vma = find_vma(current->mm, (unsigned long)objp); if (vma) return vma->vm_end - vma->vm_start; } /* * The ksize() function is only guaranteed to work for pointers * returned by kmalloc(). So handle arbitrary pointers here. */ return PAGE_SIZE << compound_order(page);}int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int flags, struct page **pages, struct vm_area_struct **vmas){ struct vm_area_struct *vma; unsigned long vm_flags; int i; int write = !!(flags & GUP_FLAGS_WRITE); int force = !!(flags & GUP_FLAGS_FORCE); int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); /* calculate required read or write permissions. * - if 'force' is set, we only require the "MAY" flags. */ vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < len; i++) { vma = find_vma(mm, start); if (!vma) goto finish_or_fault; /* protect what we can, including chardevs */ if (vma->vm_flags & (VM_IO | VM_PFNMAP) || (!ignore && !(vm_flags & vma->vm_flags))) goto finish_or_fault; if (pages) { pages[i] = virt_to_page(start); if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = vma; start += PAGE_SIZE; } return i;finish_or_fault: return i ? : -EFAULT;}/* * get a list of pages in an address range belonging to the specified process * and indicate the VMA that covers each page * - this is potentially dodgy as we may end incrementing the page count of a * slab page or a secondary page from a compound page * - don't permit access to VMAs that don't support it, such as I/O mappings */int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas){ int flags = 0; if (write) flags |= GUP_FLAGS_WRITE; if (force) flags |= GUP_FLAGS_FORCE; return __get_user_pages(tsk, mm, start, len, flags, pages, vmas);}EXPORT_SYMBOL(get_user_pages);DEFINE_RWLOCK(vmlist_lock);struct vm_struct *vmlist;void vfree(const void *addr){ kfree(addr);}EXPORT_SYMBOL(vfree);void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot){ /* * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() * returns only a logical address. */ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);}EXPORT_SYMBOL(__vmalloc);void *vmalloc_user(unsigned long size){ void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); if (ret) { struct vm_area_struct *vma; down_write(¤t->mm->mmap_sem); vma = find_vma(current->mm, (unsigned long)ret); if (vma) vma->vm_flags |= VM_USERMAP; up_write(¤t->mm->mmap_sem); } return ret;}EXPORT_SYMBOL(vmalloc_user);struct page *vmalloc_to_page(const void *addr){ return virt_to_page(addr);}EXPORT_SYMBOL(vmalloc_to_page);unsigned long vmalloc_to_pfn(const void *addr){ return page_to_pfn(virt_to_page(addr));}EXPORT_SYMBOL(vmalloc_to_pfn);long vread(char *buf, char *addr, unsigned long count){ memcpy(buf, addr, count); return count;}long vwrite(char *buf, char *addr, unsigned long count){ /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; memcpy(addr, buf, count); return(count);}/* * vmalloc - allocate virtually continguos memory * * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into continguos kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */void *vmalloc(unsigned long size){ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);}EXPORT_SYMBOL(vmalloc);void *vmalloc_node(unsigned long size, int node){ return vmalloc(size);}EXPORT_SYMBOL(vmalloc_node);#ifndef PAGE_KERNEL_EXEC# define PAGE_KERNEL_EXEC PAGE_KERNEL#endif/** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */void *vmalloc_exec(unsigned long size){ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);}/** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into continguos kernel virtual space. */void *vmalloc_32(unsigned long size){ return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);}EXPORT_SYMBOL(vmalloc_32);/** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * VM_USERMAP is set on the corresponding VMA so that subsequent calls to * remap_vmalloc_range() are permissible. */void *vmalloc_32_user(unsigned long size){ /* * We'll have to sort out the ZONE_DMA bits for 64-bit, * but for now this can simply use vmalloc_user() directly. */ return vmalloc_user(size);}EXPORT_SYMBOL(vmalloc_32_user);void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot){ BUG(); return NULL;}EXPORT_SYMBOL(vmap);void vunmap(const void *addr){ BUG();}EXPORT_SYMBOL(vunmap);void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot){ BUG(); return NULL;}EXPORT_SYMBOL(vm_map_ram);void vm_unmap_ram(const void *mem, unsigned int count){ BUG();}EXPORT_SYMBOL(vm_unmap_ram);void vm_unmap_aliases(void){}EXPORT_SYMBOL_GPL(vm_unmap_aliases);/* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */void __attribute__((weak)) vmalloc_sync_all(void){}int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page){ return -EINVAL;}EXPORT_SYMBOL(vm_insert_page);/* * sys_brk() for the most part doesn't need the global kernel * lock, except when an application is doing something nasty * like trying to un-brk an area that has already been mapped * to a regular file. in this case, the unmapping will need * to invoke file system routines that need the global lock. */SYSCALL_DEFINE1(brk, unsigned long, brk){ struct mm_struct *mm = current->mm; if (brk < mm->start_brk || brk > mm->context.end_brk) return mm->brk; if (mm->brk == brk) return mm->brk; /* * Always allow shrinking brk */ if (brk <= mm->brk) { mm->brk = brk; return brk; } /* * Ok, looks good - let it rip. */ return mm->brk = brk;}/* * initialise the VMA and region record slabs */void __init mmap_init(void){ vm_region_jar = kmem_cache_create("vm_region_jar", sizeof(struct vm_region), 0, SLAB_PANIC, NULL); vm_area_cachep = kmem_cache_create("vm_area_struct", sizeof(struct vm_area_struct), 0, SLAB_PANIC, NULL);}/* * validate the region tree * - the caller must hold the region lock */#ifdef CONFIG_DEBUG_NOMMU_REGIONSstatic noinline void validate_nommu_regions(void)
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -