亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? nommu.c

?? 最新最穩(wěn)定的Linux內存管理模塊源代碼
?? C
?? 第 1 頁 / 共 4 頁
字號:
/* *  linux/mm/nommu.c * *  Replacement code for mm functions to support CPU's that don't *  have any form of memory management unit (thus no virtual memory). * *  See Documentation/nommu-mmap.txt * *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com> *  Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> */#include <linux/module.h>#include <linux/mm.h>#include <linux/mman.h>#include <linux/swap.h>#include <linux/file.h>#include <linux/highmem.h>#include <linux/pagemap.h>#include <linux/slab.h>#include <linux/vmalloc.h>#include <linux/tracehook.h>#include <linux/blkdev.h>#include <linux/backing-dev.h>#include <linux/mount.h>#include <linux/personality.h>#include <linux/security.h>#include <linux/syscalls.h>#include <asm/uaccess.h>#include <asm/tlb.h>#include <asm/tlbflush.h>#include "internal.h"static inline __attribute__((format(printf, 1, 2)))void no_printk(const char *fmt, ...){}#if 0#define kenter(FMT, ...) \	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)#define kleave(FMT, ...) \	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)#define kdebug(FMT, ...) \	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)#else#define kenter(FMT, ...) \	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)#define kleave(FMT, ...) \	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)#define kdebug(FMT, ...) \	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)#endif#include "internal.h"void *high_memory;struct page *mem_map;unsigned long max_mapnr;unsigned long num_physpages;atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */int sysctl_overcommit_ratio = 50; /* default is 50% */int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;int sysctl_nr_trim_pages = 1; /* page trimming behaviour */int heap_stack_gap = 0;atomic_t mmap_pages_allocated;EXPORT_SYMBOL(mem_map);EXPORT_SYMBOL(num_physpages);/* list of mapped, potentially shareable regions */static struct kmem_cache *vm_region_jar;struct rb_root nommu_region_tree = RB_ROOT;DECLARE_RWSEM(nommu_region_sem);struct vm_operations_struct generic_file_vm_ops = {};/* * Handle all mappings that got truncated by a "truncate()" * system call. * * NOTE! We have to be ready to update the memory sharing * between the file and the memory map for a potential last * incomplete page.  Ugly, but necessary. */int vmtruncate(struct inode *inode, loff_t offset){	struct address_space *mapping = inode->i_mapping;	unsigned long limit;	if (inode->i_size < offset)		goto do_expand;	i_size_write(inode, offset);	truncate_inode_pages(mapping, offset);	goto out_truncate;do_expand:	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;	if (limit != RLIM_INFINITY && offset > limit)		goto out_sig;	if (offset > inode->i_sb->s_maxbytes)		goto out;	i_size_write(inode, offset);out_truncate:	if (inode->i_op->truncate)		inode->i_op->truncate(inode);	return 0;out_sig:	send_sig(SIGXFSZ, current, 0);out:	return -EFBIG;}EXPORT_SYMBOL(vmtruncate);/* * Return the total memory allocated for this pointer, not * just what the caller asked for. * * Doesn't have to be accurate, i.e. may have races. */unsigned int kobjsize(const void *objp){	struct page *page;	/*	 * If the object we have should not have ksize performed on it,	 * return size of 0	 */	if (!objp || !virt_addr_valid(objp))		return 0;	page = virt_to_head_page(objp);	/*	 * If the allocator sets PageSlab, we know the pointer came from	 * kmalloc().	 */	if (PageSlab(page))		return ksize(objp);	/*	 * If it's not a compound page, see if we have a matching VMA	 * region. This test is intentionally done in reverse order,	 * so if there's no VMA, we still fall through and hand back	 * PAGE_SIZE for 0-order pages.	 */	if (!PageCompound(page)) {		struct vm_area_struct *vma;		vma = find_vma(current->mm, (unsigned long)objp);		if (vma)			return vma->vm_end - vma->vm_start;	}	/*	 * The ksize() function is only guaranteed to work for pointers	 * returned by kmalloc(). So handle arbitrary pointers here.	 */	return PAGE_SIZE << compound_order(page);}int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,		     unsigned long start, int len, int flags,		struct page **pages, struct vm_area_struct **vmas){	struct vm_area_struct *vma;	unsigned long vm_flags;	int i;	int write = !!(flags & GUP_FLAGS_WRITE);	int force = !!(flags & GUP_FLAGS_FORCE);	int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);	/* calculate required read or write permissions.	 * - if 'force' is set, we only require the "MAY" flags.	 */	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);	for (i = 0; i < len; i++) {		vma = find_vma(mm, start);		if (!vma)			goto finish_or_fault;		/* protect what we can, including chardevs */		if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||		    (!ignore && !(vm_flags & vma->vm_flags)))			goto finish_or_fault;		if (pages) {			pages[i] = virt_to_page(start);			if (pages[i])				page_cache_get(pages[i]);		}		if (vmas)			vmas[i] = vma;		start += PAGE_SIZE;	}	return i;finish_or_fault:	return i ? : -EFAULT;}/* * get a list of pages in an address range belonging to the specified process * and indicate the VMA that covers each page * - this is potentially dodgy as we may end incrementing the page count of a *   slab page or a secondary page from a compound page * - don't permit access to VMAs that don't support it, such as I/O mappings */int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,	unsigned long start, int len, int write, int force,	struct page **pages, struct vm_area_struct **vmas){	int flags = 0;	if (write)		flags |= GUP_FLAGS_WRITE;	if (force)		flags |= GUP_FLAGS_FORCE;	return __get_user_pages(tsk, mm,				start, len, flags,				pages, vmas);}EXPORT_SYMBOL(get_user_pages);DEFINE_RWLOCK(vmlist_lock);struct vm_struct *vmlist;void vfree(const void *addr){	kfree(addr);}EXPORT_SYMBOL(vfree);void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot){	/*	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()	 * returns only a logical address.	 */	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);}EXPORT_SYMBOL(__vmalloc);void *vmalloc_user(unsigned long size){	void *ret;	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,			PAGE_KERNEL);	if (ret) {		struct vm_area_struct *vma;		down_write(&current->mm->mmap_sem);		vma = find_vma(current->mm, (unsigned long)ret);		if (vma)			vma->vm_flags |= VM_USERMAP;		up_write(&current->mm->mmap_sem);	}	return ret;}EXPORT_SYMBOL(vmalloc_user);struct page *vmalloc_to_page(const void *addr){	return virt_to_page(addr);}EXPORT_SYMBOL(vmalloc_to_page);unsigned long vmalloc_to_pfn(const void *addr){	return page_to_pfn(virt_to_page(addr));}EXPORT_SYMBOL(vmalloc_to_pfn);long vread(char *buf, char *addr, unsigned long count){	memcpy(buf, addr, count);	return count;}long vwrite(char *buf, char *addr, unsigned long count){	/* Don't allow overflow */	if ((unsigned long) addr + count < count)		count = -(unsigned long) addr;	memcpy(addr, buf, count);	return(count);}/* *	vmalloc  -  allocate virtually continguos memory * *	@size:		allocation size * *	Allocate enough pages to cover @size from the page level *	allocator and map them into continguos kernel virtual space. * *	For tight control over page level allocator and protection flags *	use __vmalloc() instead. */void *vmalloc(unsigned long size){       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);}EXPORT_SYMBOL(vmalloc);void *vmalloc_node(unsigned long size, int node){	return vmalloc(size);}EXPORT_SYMBOL(vmalloc_node);#ifndef PAGE_KERNEL_EXEC# define PAGE_KERNEL_EXEC PAGE_KERNEL#endif/** *	vmalloc_exec  -  allocate virtually contiguous, executable memory *	@size:		allocation size * *	Kernel-internal function to allocate enough pages to cover @size *	the page level allocator and map them into contiguous and *	executable kernel virtual space. * *	For tight control over page level allocator and protection flags *	use __vmalloc() instead. */void *vmalloc_exec(unsigned long size){	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);}/** * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable) *	@size:		allocation size * *	Allocate enough 32bit PA addressable pages to cover @size from the *	page level allocator and map them into continguos kernel virtual space. */void *vmalloc_32(unsigned long size){	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);}EXPORT_SYMBOL(vmalloc_32);/** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory *	@size:		allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * VM_USERMAP is set on the corresponding VMA so that subsequent calls to * remap_vmalloc_range() are permissible. */void *vmalloc_32_user(unsigned long size){	/*	 * We'll have to sort out the ZONE_DMA bits for 64-bit,	 * but for now this can simply use vmalloc_user() directly.	 */	return vmalloc_user(size);}EXPORT_SYMBOL(vmalloc_32_user);void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot){	BUG();	return NULL;}EXPORT_SYMBOL(vmap);void vunmap(const void *addr){	BUG();}EXPORT_SYMBOL(vunmap);void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot){	BUG();	return NULL;}EXPORT_SYMBOL(vm_map_ram);void vm_unmap_ram(const void *mem, unsigned int count){	BUG();}EXPORT_SYMBOL(vm_unmap_ram);void vm_unmap_aliases(void){}EXPORT_SYMBOL_GPL(vm_unmap_aliases);/* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */void  __attribute__((weak)) vmalloc_sync_all(void){}int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,		   struct page *page){	return -EINVAL;}EXPORT_SYMBOL(vm_insert_page);/* *  sys_brk() for the most part doesn't need the global kernel *  lock, except when an application is doing something nasty *  like trying to un-brk an area that has already been mapped *  to a regular file.  in this case, the unmapping will need *  to invoke file system routines that need the global lock. */SYSCALL_DEFINE1(brk, unsigned long, brk){	struct mm_struct *mm = current->mm;	if (brk < mm->start_brk || brk > mm->context.end_brk)		return mm->brk;	if (mm->brk == brk)		return mm->brk;	/*	 * Always allow shrinking brk	 */	if (brk <= mm->brk) {		mm->brk = brk;		return brk;	}	/*	 * Ok, looks good - let it rip.	 */	return mm->brk = brk;}/* * initialise the VMA and region record slabs */void __init mmap_init(void){	vm_region_jar = kmem_cache_create("vm_region_jar",					  sizeof(struct vm_region), 0,					  SLAB_PANIC, NULL);	vm_area_cachep = kmem_cache_create("vm_area_struct",					   sizeof(struct vm_area_struct), 0,					   SLAB_PANIC, NULL);}/* * validate the region tree * - the caller must hold the region lock */#ifdef CONFIG_DEBUG_NOMMU_REGIONSstatic noinline void validate_nommu_regions(void)

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
精品三级在线观看| 亚洲美女免费在线| 中文字幕中文字幕一区| 亚洲人成网站在线| 日本色综合中文字幕| 精品在线播放午夜| 国产凹凸在线观看一区二区| 蜜臀av一区二区| 国产福利精品导航| 91日韩在线专区| 欧美高清激情brazzers| 日韩美女视频在线| 久久久777精品电影网影网| 国产精品素人一区二区| 亚洲女人小视频在线观看| 亚洲电影一区二区| 精品一区二区成人精品| 丁香亚洲综合激情啪啪综合| 色婷婷精品大在线视频| 91精品国产一区二区三区香蕉| 日韩女同互慰一区二区| 国产三级精品在线| 亚洲免费观看在线观看| 日本aⅴ免费视频一区二区三区| 国产一区 二区 三区一级| av电影在线观看完整版一区二区 | 欧美特级限制片免费在线观看| 在线成人av影院| 久久久www免费人成精品| 亚洲日本成人在线观看| 美女视频网站黄色亚洲| av电影一区二区| 欧美日韩一区二区三区在线| 日韩精品在线看片z| 中文字幕中文在线不卡住| 日韩成人精品在线观看| 成人免费视频免费观看| 欧美精品视频www在线观看| 久久精品亚洲一区二区三区浴池| 亚洲精品视频在线观看免费| 久久精品国产亚洲一区二区三区| 不卡大黄网站免费看| 在线电影一区二区三区| 国产精品成人免费精品自在线观看| 天堂午夜影视日韩欧美一区二区| 国产成人亚洲精品青草天美| 欧美高清一级片在线| 国产精品毛片大码女人| 日一区二区三区| av福利精品导航| 2020国产精品久久精品美国| 亚洲一区二区免费视频| 成人午夜激情片| 日韩一区二区电影| 一区二区三区免费| 粉嫩aⅴ一区二区三区四区| 51久久夜色精品国产麻豆| 中文字幕一区二区三区在线播放| 麻豆精品一区二区| 在线观看国产一区二区| 国产精品美日韩| 久久福利视频一区二区| 欧美日韩在线不卡| 日韩美女精品在线| 国产成人综合亚洲网站| 日韩免费电影网站| 午夜欧美大尺度福利影院在线看| 99精品久久99久久久久| 欧美一区二区精美| 婷婷成人综合网| 欧美中文字幕一区| 亚洲日本青草视频在线怡红院| 国产99精品在线观看| 精品国内片67194| 日本中文字幕一区二区有限公司| 日本高清免费不卡视频| 专区另类欧美日韩| 不卡一区在线观看| 国产精品―色哟哟| 欧美一区二区三区免费| 亚洲国产人成综合网站| 日本道免费精品一区二区三区| 中文字幕乱码久久午夜不卡| 国产乱码精品一品二品| 欧美精品一区二区三区在线播放| 免费观看91视频大全| 91精品国产综合久久久久久久久久 | 国产精品国产三级国产普通话蜜臀| 国产呦精品一区二区三区网站| 日韩视频国产视频| 日韩高清在线一区| 欧美一级xxx| 青青草国产精品97视觉盛宴| 日韩欧美高清在线| 极品少妇xxxx偷拍精品少妇| 日韩美女视频在线| 国产在线播精品第三| 久久久亚洲精品一区二区三区 | 欧美色爱综合网| 亚洲私人影院在线观看| 99国产精品一区| 亚洲乱码国产乱码精品精的特点| 色综合中文字幕| 亚洲一区在线观看网站| 欧美日韩亚洲综合| 日韩理论片网站| 欧美三级电影在线观看| 日韩不卡在线观看日韩不卡视频| 日韩欧美一区二区不卡| 国产在线观看一区二区| 久久超碰97人人做人人爱| 久久久久久亚洲综合影院红桃| 久久99国产乱子伦精品免费| 欧美成人一区二区三区片免费| 久久精品国产亚洲高清剧情介绍| 日韩一区二区视频在线观看| 精品影视av免费| 国产欧美日韩精品a在线观看| 国产精品99久久久久久有的能看| 国产欧美中文在线| 91社区在线播放| 日韩精彩视频在线观看| 欧美精品久久99| 黄页视频在线91| 国产精品免费免费| 欧美日韩国产高清一区二区三区 | 麻豆91在线观看| 国产亚洲精品超碰| 在线免费观看日本一区| 日韩**一区毛片| 国产精品蜜臀在线观看| 99re成人在线| 亚洲成a人v欧美综合天堂下载| 精品三级av在线| 91亚洲资源网| 日本不卡不码高清免费观看| 中文字幕高清一区| 色欧美片视频在线观看在线视频| 亚洲图片欧美一区| 日韩一本二本av| 粉嫩久久99精品久久久久久夜| 亚洲激情自拍视频| 91精品国产乱| 粉嫩av一区二区三区在线播放 | 中文字幕亚洲视频| 欧美一区三区四区| 不卡视频免费播放| 日韩成人精品在线| 丰满放荡岳乱妇91ww| 亚洲一区二区三区小说| 欧美不卡一二三| 成人黄色软件下载| 五月天丁香久久| 亚洲精品一区二区三区影院 | 国产欧美一区二区精品性色 | 久久影院午夜论| 欧美视频中文字幕| 国产.欧美.日韩| 亚洲成a人片综合在线| 中文乱码免费一区二区| 欧美一区二区三区思思人| thepron国产精品| 久久成人麻豆午夜电影| 一区二区国产盗摄色噜噜| 国产日韩欧美一区二区三区乱码| 538prom精品视频线放| 色一情一乱一乱一91av| 粉嫩13p一区二区三区| 日本不卡视频一二三区| 亚洲电影一区二区三区| 国产精品久久久久久久久免费樱桃 | 一区二区三区av电影 | 亚洲精品日产精品乱码不卡| 精品国产乱码久久久久久图片| 欧美日韩一区不卡| 97精品视频在线观看自产线路二| 国产精品主播直播| 免费在线一区观看| 亚洲午夜影视影院在线观看| 亚洲天堂免费看| 国产欧美精品在线观看| 欧美大胆人体bbbb| 在线播放中文字幕一区| 欧美在线小视频| 91毛片在线观看| 成人黄色a**站在线观看| 国产一区二区剧情av在线| 日韩av不卡一区二区| 午夜电影一区二区三区| 国产精品大尺度| 国产日韩一级二级三级| 精品国产人成亚洲区| 日韩一区二区不卡| 日韩欧美资源站| 日韩一区二区免费视频| 日韩午夜激情电影| 欧美大片日本大片免费观看| 精品日韩在线观看| 精品少妇一区二区三区| 欧美成人国产一区二区|