亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來(lái)到蟲(chóng)蟲(chóng)下載站! | ?? 資源下載 ?? 資源專(zhuān)輯 ?? 關(guān)于我們
? 蟲(chóng)蟲(chóng)下載站

?? mmap.c

?? LINUX1.0源代碼,代碼條理清晰
?? C
字號(hào):
/*
 *	linux/mm/mmap.c
 *
 * Written by obz.
 */
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/malloc.h>

#include <asm/segment.h>
#include <asm/system.h>

static int anon_map(struct inode *, struct file *,
		    unsigned long, size_t, int,
		    unsigned long);
/*
 * description of effects of mapping type and prot in current implementation.
 * this is due to the current handling of page faults in memory.c. the expected
 * behavior is in parens:
 *
 * map_type	prot
 *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
 * MAP_SHARED	r: (no) yes	r: (yes) yes	r: (no) yes	r: (no) no
 *		w: (no) yes	w: (no) copy	w: (yes) yes	w: (no) no
 *		x: (no) no	x: (no) no	x: (no) no	x: (yes) no
 *		
 * MAP_PRIVATE	r: (no) yes	r: (yes) yes	r: (no) yes	r: (no) no
 *		w: (no) copy	w: (no) copy	w: (copy) copy	w: (no) no
 *		x: (no) no	x: (no) no	x: (no) no	x: (yes) no
 *
 */

#define CODE_SPACE(addr)	\
 (PAGE_ALIGN(addr) < current->start_code + current->end_code)

int do_mmap(struct file * file, unsigned long addr, unsigned long len,
	unsigned long prot, unsigned long flags, unsigned long off)
{
	int mask, error;

	if ((len = PAGE_ALIGN(len)) == 0)
		return addr;

	if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
		return -EINVAL;

	/*
	 * do simple checking here so the lower-level routines won't have
	 * to. we assume access permissions have been handled by the open
	 * of the memory object, so we don't do any here.
	 */

	if (file != NULL)
		switch (flags & MAP_TYPE) {
		case MAP_SHARED:
			if ((prot & PROT_WRITE) && !(file->f_mode & 2))
				return -EACCES;
			/* fall through */
		case MAP_PRIVATE:
			if (!(file->f_mode & 1))
				return -EACCES;
			break;

		default:
			return -EINVAL;
		}
	/*
	 * obtain the address to map to. we verify (or select) it and ensure
	 * that it represents a valid section of the address space.
	 */

	if (flags & MAP_FIXED) {
		if (addr & ~PAGE_MASK)
			return -EINVAL;
		if (len > TASK_SIZE || addr > TASK_SIZE - len)
			return -EINVAL;
	} else {
		struct vm_area_struct * vmm;

		/* Maybe this works.. Ugly it is. */
		addr = SHM_RANGE_START;
		while (addr+len < SHM_RANGE_END) {
			for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
				if (addr >= vmm->vm_end)
					continue;
				if (addr + len <= vmm->vm_start)
					continue;
				addr = PAGE_ALIGN(vmm->vm_end);
				break;
			}
			if (!vmm)
				break;
		}
		if (addr+len >= SHM_RANGE_END)
			return -ENOMEM;
	}

	/*
	 * determine the object being mapped and call the appropriate
	 * specific mapper. the address has already been validated, but
	 * not unmapped, but the maps are removed from the list.
	 */
	if (file && (!file->f_op || !file->f_op->mmap))
		return -ENODEV;
	mask = 0;
	if (prot & (PROT_READ | PROT_EXEC))
		mask |= PAGE_READONLY;
	if (prot & PROT_WRITE)
		if ((flags & MAP_TYPE) == MAP_PRIVATE)
			mask |= PAGE_COPY;
		else
			mask |= PAGE_SHARED;
	if (!mask)
		return -EINVAL;

	do_munmap(addr, len);	/* Clear old maps */

	if (file)
		error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
	else
		error = anon_map(NULL, NULL, addr, len, mask, off);
	
	if (!error)
		return addr;

	if (!current->errno)
		current->errno = -error;
	return -1;
}

asmlinkage int sys_mmap(unsigned long *buffer)
{
	int error;
	unsigned long flags;
	struct file * file = NULL;

	error = verify_area(VERIFY_READ, buffer, 6*4);
	if (error)
		return error;
	flags = get_fs_long(buffer+3);
	if (!(flags & MAP_ANONYMOUS)) {
		unsigned long fd = get_fs_long(buffer+4);
		if (fd >= NR_OPEN || !(file = current->filp[fd]))
			return -EBADF;
	}
	return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
		get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
}

/*
 * Normal function to fix up a mapping
 * This function is the default for when an area has no specific
 * function.  This may be used as part of a more specific routine.
 * This function works out what part of an area is affected and
 * adjusts the mapping information.  Since the actual page
 * manipulation is done in do_mmap(), none need be done here,
 * though it would probably be more appropriate.
 *
 * By the time this function is called, the area struct has been
 * removed from the process mapping list, so it needs to be
 * reinserted if necessary.
 *
 * The 4 main cases are:
 *    Unmapping the whole area
 *    Unmapping from the start of the segment to a point in it
 *    Unmapping from an intermediate point to the end
 *    Unmapping between to intermediate points, making a hole.
 *
 * Case 4 involves the creation of 2 new areas, for each side of
 * the hole.
 */
void unmap_fixup(struct vm_area_struct *area,
		 unsigned long addr, size_t len)
{
	struct vm_area_struct *mpnt;
	unsigned long end = addr + len;

	if (addr < area->vm_start || addr >= area->vm_end ||
	    end <= area->vm_start || end > area->vm_end ||
	    end < addr)
	{
		printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
		       area->vm_start, area->vm_end, addr, end);
		return;
	}

	/* Unmapping the whole area */
	if (addr == area->vm_start && end == area->vm_end) {
		if (area->vm_ops && area->vm_ops->close)
			area->vm_ops->close(area);
		return;
	}

	/* Work out to one of the ends */
	if (addr >= area->vm_start && end == area->vm_end)
		area->vm_end = addr;
	if (addr == area->vm_start && end <= area->vm_end) {
		area->vm_offset += (end - area->vm_start);
		area->vm_start = end;
	}

	/* Unmapping a hole */
	if (addr > area->vm_start && end < area->vm_end)
	{
		/* Add end mapping -- leave beginning for below */
		mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);

		*mpnt = *area;
		mpnt->vm_offset += (end - area->vm_start);
		mpnt->vm_start = end;
		if (mpnt->vm_inode)
			mpnt->vm_inode->i_count++;
		insert_vm_struct(current, mpnt);
		area->vm_end = addr;	/* Truncate area */
	}

	/* construct whatever mapping is needed */
	mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
	*mpnt = *area;
	insert_vm_struct(current, mpnt);
}


asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
{
	return -EINVAL; /* Not implemented yet */
}

asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
	return do_munmap(addr, len);
}

/*
 * Munmap is split into 2 main parts -- this part which finds
 * what needs doing, and the areas themselves, which do the
 * work.  This now handles partial unmappings.
 * Jeremy Fitzhardine <jeremy@sw.oz.au>
 */
int do_munmap(unsigned long addr, size_t len)
{
	struct vm_area_struct *mpnt, **npp, *free;

	if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
		return -EINVAL;

	if ((len = PAGE_ALIGN(len)) == 0)
		return 0;

	/*
	 * Check if this memory area is ok - put it on the temporary
	 * list if so..  The checks here are pretty simple --
	 * every area affected in some way (by any overlap) is put
	 * on the list.  If nothing is put on, nothing is affected.
	 */
	npp = &current->mmap;
	free = NULL;
	for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
		unsigned long end = addr+len;

		if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
		    (addr >= mpnt->vm_end && end > mpnt->vm_end))
		{
			npp = &mpnt->vm_next;
			continue;
		}

		*npp = mpnt->vm_next;
		mpnt->vm_next = free;
		free = mpnt;
	}

	if (free == NULL)
		return 0;

	/*
	 * Ok - we have the memory areas we should free on the 'free' list,
	 * so release them, and unmap the page range..
	 * If the one of the segments is only being partially unmapped,
	 * it will put new vm_area_struct(s) into the address space.
	 */
	while (free) {
		unsigned long st, end;

		mpnt = free;
		free = free->vm_next;

		st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
		end = addr+len;
		end = end > mpnt->vm_end ? mpnt->vm_end : end;

		if (mpnt->vm_ops && mpnt->vm_ops->unmap)
			mpnt->vm_ops->unmap(mpnt, st, end-st);
		else
			unmap_fixup(mpnt, st, end-st);

		kfree(mpnt);
	}

	unmap_page_range(addr, len);
	return 0;
}

/* This is used for a general mmap of a disk file */
int generic_mmap(struct inode * inode, struct file * file,
	unsigned long addr, size_t len, int prot, unsigned long off)
{
  	struct vm_area_struct * mpnt;
	extern struct vm_operations_struct file_mmap;
	struct buffer_head * bh;

	if (prot & PAGE_RW)	/* only PAGE_COW or read-only supported right now */
		return -EINVAL;
	if (off & (inode->i_sb->s_blocksize - 1))
		return -EINVAL;
	if (!inode->i_sb || !S_ISREG(inode->i_mode))
		return -EACCES;
	if (!inode->i_op || !inode->i_op->bmap)
		return -ENOEXEC;
	if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
		return -EACCES;
	if (!IS_RDONLY(inode)) {
		inode->i_atime = CURRENT_TIME;
		inode->i_dirt = 1;
	}
	brelse(bh);

	mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!mpnt)
		return -ENOMEM;

	unmap_page_range(addr, len);	
	mpnt->vm_task = current;
	mpnt->vm_start = addr;
	mpnt->vm_end = addr + len;
	mpnt->vm_page_prot = prot;
	mpnt->vm_share = NULL;
	mpnt->vm_inode = inode;
	inode->i_count++;
	mpnt->vm_offset = off;
	mpnt->vm_ops = &file_mmap;
	insert_vm_struct(current, mpnt);
	merge_segments(current->mmap, NULL, NULL);
	
	return 0;
}

/*
 * Insert vm structure into process list
 * This makes sure the list is sorted by start address, and
 * some some simple overlap checking.
 * JSGF
 */
void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
{
	struct vm_area_struct **nxtpp, *mpnt;

	nxtpp = &t->mmap;
	
	for(mpnt = t->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
	{
		if (mpnt->vm_start > vmp->vm_start)
			break;
		nxtpp = &mpnt->vm_next;

		if ((vmp->vm_start >= mpnt->vm_start &&
		     vmp->vm_start < mpnt->vm_end) ||
		    (vmp->vm_end >= mpnt->vm_start &&
		     vmp->vm_end < mpnt->vm_end))
			printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
			       vmp->vm_start, vmp->vm_end,
			       mpnt->vm_start, vmp->vm_end);
	}
	
	vmp->vm_next = mpnt;

	*nxtpp = vmp;
}

/*
 * Merge a list of memory segments if possible.
 * Redundant vm_area_structs are freed.
 * This assumes that the list is ordered by address.
 */
void merge_segments(struct vm_area_struct *mpnt,
		    map_mergep_fnp mergep, void *mpd)
{
	struct vm_area_struct *prev, *next;

	if (mpnt == NULL)
		return;
	
	for(prev = mpnt, mpnt = mpnt->vm_next;
	    mpnt != NULL;
	    prev = mpnt, mpnt = next)
	{
		int mp;

		next = mpnt->vm_next;
		
		if (mergep == NULL)
		{
			unsigned long psz = prev->vm_end - prev->vm_start;
			mp = prev->vm_offset + psz == mpnt->vm_offset;
		}
		else
			mp = (*mergep)(prev, mpnt, mpd);

		/*
		 * Check they are compatible.
		 * and the like...
		 * What does the share pointer mean?
		 */
		if (prev->vm_ops != mpnt->vm_ops ||
		    prev->vm_page_prot != mpnt->vm_page_prot ||
		    prev->vm_inode != mpnt->vm_inode ||
		    prev->vm_end != mpnt->vm_start ||
		    !mp ||
		    prev->vm_share != mpnt->vm_share ||		/* ?? */
		    prev->vm_next != mpnt)			/* !!! */
			continue;

		/*
		 * merge prev with mpnt and set up pointers so the new
		 * big segment can possibly merge with the next one.
		 * The old unused mpnt is freed.
		 */
		prev->vm_end = mpnt->vm_end;
		prev->vm_next = mpnt->vm_next;
		kfree_s(mpnt, sizeof(*mpnt));
		mpnt = prev;
	}
}

/*
 * Map memory not associated with any file into a process
 * address space.  Adjecent memory is merged.
 */
static int anon_map(struct inode *ino, struct file * file,
		    unsigned long addr, size_t len, int mask,
		    unsigned long off)
{
  	struct vm_area_struct * mpnt;

	if (zeromap_page_range(addr, len, mask))
		return -ENOMEM;

	mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!mpnt)
		return -ENOMEM;

	mpnt->vm_task = current;
	mpnt->vm_start = addr;
	mpnt->vm_end = addr + len;
	mpnt->vm_page_prot = mask;
	mpnt->vm_share = NULL;
	mpnt->vm_inode = NULL;
	mpnt->vm_offset = 0;
	mpnt->vm_ops = NULL;
	insert_vm_struct(current, mpnt);
	merge_segments(current->mmap, ignoff_mergep, NULL);

	return 0;
}

/* Merge, ignoring offsets */
int ignoff_mergep(const struct vm_area_struct *m1,
		  const struct vm_area_struct *m2,
		  void *data)
{
	if (m1->vm_inode != m2->vm_inode)	/* Just to be sure */
		return 0;

	return (struct inode *)data == m1->vm_inode;
}

?? 快捷鍵說(shuō)明

復(fù)制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號(hào) Ctrl + =
減小字號(hào) Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
久久视频一区二区| 免费观看一级欧美片| 无吗不卡中文字幕| 成人小视频在线| 欧美一区永久视频免费观看| 国产精品进线69影院| 久久成人综合网| 欧美日韩免费一区二区三区| 国产精品污污网站在线观看| 国产综合久久久久久鬼色 | 91网站黄www| 精品久久久久久最新网址| 亚洲国产美女搞黄色| www.成人网.com| 久久婷婷成人综合色| 日韩av中文在线观看| 欧洲亚洲精品在线| 日韩一区欧美一区| 成人精品免费视频| 日本一区二区在线不卡| 狠狠久久亚洲欧美| 欧美一级爆毛片| 日韩av午夜在线观看| 欧美日韩一区二区三区在线| 日韩毛片一二三区| 成人一区在线看| 国产人久久人人人人爽| 韩国精品主播一区二区在线观看| 欧美一区二区三区视频| 日韩**一区毛片| 欧美丰满美乳xxx高潮www| 亚洲成人av中文| 欧美日韩国产电影| 天天综合网天天综合色| 欧美日韩精品是欧美日韩精品| 蜜桃av噜噜一区| 久久精品国产免费看久久精品| 国产精品污www在线观看| 欧美韩国日本不卡| 日本网站在线观看一区二区三区| 欧美午夜精品一区| 亚洲超丰满肉感bbw| 91精品国产一区二区三区蜜臀| 亚洲va天堂va国产va久| 91麻豆精品国产自产在线观看一区 | 中文在线一区二区| 成人av资源网站| 亚洲欧美日韩国产成人精品影院| 色综合天天综合在线视频| 一区二区三区在线看| 91 com成人网| 国内成人免费视频| 国产精品情趣视频| 欧美性生活影院| 日韩精品91亚洲二区在线观看| 91精品福利在线一区二区三区 | 精品国产sm最大网站免费看| 美国毛片一区二区| 久久蜜桃一区二区| 99re热这里只有精品视频| 亚洲国产日产av| 精品国产1区2区3区| eeuss鲁片一区二区三区| 一区二区三区鲁丝不卡| 日韩一级片在线播放| 成人看片黄a免费看在线| 亚洲亚洲人成综合网络| 2021中文字幕一区亚洲| 91亚洲大成网污www| 日韩精品一二三四| 欧美国产精品一区| 3atv一区二区三区| 成人午夜在线免费| 日韩avvvv在线播放| 国产精品毛片久久久久久久| 91精品麻豆日日躁夜夜躁| 国产风韵犹存在线视精品| 亚洲一区中文在线| 久久综合九色综合欧美亚洲| 在线免费精品视频| 国产精品一区二区在线播放| 艳妇臀荡乳欲伦亚洲一区| 久久久精品影视| 欧美三级日本三级少妇99| 成人一区二区在线观看| 免费日韩伦理电影| 一区二区三国产精华液| 欧美高清在线视频| 日韩免费视频一区二区| 欧美性大战久久久久久久| 成人一二三区视频| 久久不见久久见免费视频7 | 国产成人av一区二区三区在线| 亚洲成a人在线观看| 中文字幕亚洲一区二区av在线 | 久久久国产综合精品女国产盗摄| 欧美日韩国产大片| 欧美亚一区二区| 97se亚洲国产综合自在线| 国产在线视视频有精品| 奇米888四色在线精品| 亚洲一区成人在线| 亚洲欧美精品午睡沙发| 国产精品美女久久久久aⅴ国产馆| 精品免费国产一区二区三区四区| 欧美性猛交xxxx黑人交| 在线视频中文字幕一区二区| av中文字幕在线不卡| 国产69精品久久久久777| 国产在线看一区| 精品亚洲aⅴ乱码一区二区三区| 丝袜亚洲精品中文字幕一区| 一区二区三区国产精华| 亚洲欧美视频在线观看视频| 国产精品理论片在线观看| 国产精品久久看| 中文字幕一区二区三区视频| 中文字幕一区视频| 亚洲欧美日韩一区二区三区在线观看| 国产精品少妇自拍| 亚洲欧美一区二区三区久本道91| 中文字幕在线一区二区三区| 亚洲品质自拍视频| 夜夜爽夜夜爽精品视频| 天天综合网 天天综合色| 日本美女一区二区三区视频| 久久99国产精品尤物| 国产在线视频不卡二| 不卡一区中文字幕| 91原创在线视频| 欧美丝袜丝交足nylons图片| 日本国产一区二区| 欧美日韩aaaaa| 久久一夜天堂av一区二区三区| 国产日本一区二区| 一区二区三区四区高清精品免费观看| 亚洲影院免费观看| 精品一区二区三区在线播放 | 欧美mv和日韩mv国产网站| 欧美国产日韩在线观看| 一区二区高清视频在线观看| 日本欧美一区二区| 国产超碰在线一区| 欧美亚洲国产一区二区三区| xfplay精品久久| 亚洲三级在线免费观看| 日本特黄久久久高潮| 成人av综合一区| 欧美久久久久中文字幕| 久久久久九九视频| 亚洲图片有声小说| 激情久久久久久久久久久久久久久久| 国产在线精品视频| 久久不见久久见免费视频7 | 国产成人精品免费一区二区| 成人一级黄色片| 欧美午夜精品一区二区蜜桃| 欧美精三区欧美精三区| 国产精品久久久久影院老司| 亚洲另类一区二区| 日韩1区2区3区| 成人app网站| 欧美r级在线观看| 中文字幕一区免费在线观看| 亚洲一级二级三级| 亚洲成人福利片| aaa亚洲精品| 欧美精品国产精品| 久久精品免视看| 亚洲欧美日韩精品久久久久| 蜜臀av亚洲一区中文字幕| 国产suv精品一区二区6| 在线欧美小视频| 日韩欧美不卡一区| 午夜av电影一区| 国产不卡高清在线观看视频| 在线观看免费亚洲| 国产精品盗摄一区二区三区| 免费精品视频在线| 日本国产一区二区| 精品国产精品网麻豆系列| 视频一区在线播放| 成人sese在线| 精品欧美乱码久久久久久| 日韩美女视频在线| 午夜日韩在线观看| jlzzjlzz欧美大全| 日韩精品一区二区三区四区| 亚洲午夜电影在线观看| k8久久久一区二区三区| 精品毛片乱码1区2区3区| 亚洲综合免费观看高清完整版 | av一区二区三区| 欧美日韩国产欧美日美国产精品| 欧美激情综合五月色丁香| 午夜国产精品一区| 国产乱一区二区| 久久久99久久精品欧美| 日本中文在线一区| 欧美午夜一区二区|