亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? blkmtd.c

?? nandflash k9g808u0a在pxa270的驅動,由于pxa270沒有nandflash接口
?? C
?? 第 1 頁 / 共 2 頁
字號:
/* * $Id: blkmtd.c,v 1.2 2007/09/21 03:09:24 quy Exp $ * * blkmtd.c - use a block device as a fake MTD * * Author: Simon Evans <spse@secret.org.uk> * * Copyright (C) 2001,2002 Simon Evans * * Licence: GPL * * How it works: *	The driver uses raw/io to read/write the device and the page *	cache to cache access. Writes update the page cache with the *	new data and mark it dirty and add the page into a kiobuf. *	When the kiobuf becomes full or the next extry is to an earlier *	block in the kiobuf then it is flushed to disk. This allows *	writes to remained ordered and gives a small and simple outgoing *	write cache. * *	It can be loaded Read-Only to prevent erases and writes to the *	medium. * */#include <linux/config.h>#include <linux/module.h>#include <linux/fs.h>#include <linux/blkdev.h>#include <linux/iobuf.h>#include <linux/slab.h>#include <linux/pagemap.h>#include <linux/list.h>#include <linux/mtd/mtd.h>#ifdef CONFIG_MTD_DEBUG#ifdef CONFIG_PROC_FS#  include <linux/proc_fs.h>#  define BLKMTD_PROC_DEBUG   static struct proc_dir_entry *blkmtd_proc;#endif#endif#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)/* Default erase size in KiB, always make it a multiple of PAGE_SIZE */#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10)	/* 128KiB */#define VERSION "1.10"/* Info for the block device */struct blkmtd_dev {	struct list_head list;	struct block_device *binding;	struct mtd_info mtd_info;	struct kiobuf *rd_buf, *wr_buf;	long iobuf_locks;	struct semaphore wrbuf_mutex;};/* Static info about the MTD, used in cleanup_module */static LIST_HEAD(blkmtd_device_list);static void blkmtd_sync(struct mtd_info *mtd);#define MAX_DEVICES 4/* Module parameters passed by insmod/modprobe */char *device[MAX_DEVICES];    /* the block device to use */int erasesz[MAX_DEVICES];     /* optional default erase size */int ro[MAX_DEVICES];          /* optional read only flag */int sync;MODULE_LICENSE("GPL");MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");MODULE_DESCRIPTION("Emulate an MTD using a block device");MODULE_PARM(device, "1-4s");MODULE_PARM_DESC(device, "block device to use");MODULE_PARM(erasesz, "1-4i");MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");MODULE_PARM(ro, "1-4i");MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");MODULE_PARM(sync, "i");MODULE_PARM_DESC(sync, "1=Synchronous writes");/** * read_pages - read in pages via the page cache * @dev: device to read from * @pagenrs: list of page numbers wanted * @pagelst: storage for struce page * pointers * @pages: count of pages wanted * * Read pages, getting them from the page cache if available * else reading them in from disk if not. pagelst must be preallocated * to hold the page count. */static int read_pages(struct blkmtd_dev *dev, int pagenrs[], struct page **pagelst, int pages){	kdev_t kdev;	struct page *page;	int cnt = 0;	struct kiobuf *iobuf;	int err = 0;	if(!dev) {		err("read_pages: PANIC dev == NULL");		return -EIO;	}	kdev = to_kdev_t(dev->binding->bd_dev);	DEBUG(2, "read_pages: reading %d pages\n", pages);	if(test_and_set_bit(0, &dev->iobuf_locks)) {		err = alloc_kiovec(1, &iobuf);		if (err) {			crit("cant allocate kiobuf");			return -ENOMEM;		}#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)		iobuf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);		if(iobuf->blocks == NULL) {			crit("cant allocate iobuf blocks");			free_kiovec(1, &iobuf);			return -ENOMEM;		}#endif	} else {		iobuf = dev->rd_buf;	}	iobuf->nr_pages = 0;	iobuf->length = 0;	iobuf->offset = 0;	iobuf->locked = 1;		for(cnt = 0; cnt < pages; cnt++) {		page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenrs[cnt]);		pagelst[cnt] = page;		if(!Page_Uptodate(page)) {				iobuf->blocks[iobuf->nr_pages] = pagenrs[cnt];				iobuf->maplist[iobuf->nr_pages++] = page;		}	}	if(iobuf->nr_pages) {		iobuf->length = iobuf->nr_pages << PAGE_SHIFT;		err = brw_kiovec(READ, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);		DEBUG(3, "blkmtd: read_pages: finished, err = %d\n", err);		if(err < 0) {			while(pages--) {				ClearPageUptodate(pagelst[pages]);				unlock_page(pagelst[pages]);				page_cache_release(pagelst[pages]);			}		} else {			while(iobuf->nr_pages--) {				SetPageUptodate(iobuf->maplist[iobuf->nr_pages]);			}			err = 0;		}	}	if(iobuf != dev->rd_buf) {#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)		kfree(iobuf->blocks);#endif		free_kiovec(1, &iobuf);	} else {		clear_bit(0, &dev->iobuf_locks);	}	DEBUG(2, "read_pages: done, err = %d\n", err);	return err;}/** * commit_pages - commit pages in the writeout kiobuf to disk * @dev: device to write to * * If the current dev has pages in the dev->wr_buf kiobuf, * they are written to disk using brw_kiovec() */static int commit_pages(struct blkmtd_dev *dev){	struct kiobuf *iobuf = dev->wr_buf;	kdev_t kdev = to_kdev_t(dev->binding->bd_dev);	int err = 0;	iobuf->length = iobuf->nr_pages << PAGE_SHIFT;	iobuf->locked = 1;	if(iobuf->length) {		int i;		DEBUG(2, "blkmtd: commit_pages: nrpages = %d\n", iobuf->nr_pages);		/* Check all the pages are dirty and lock them */		for(i = 0; i < iobuf->nr_pages; i++) {			struct page *page = iobuf->maplist[i];			BUG_ON(!PageDirty(page));			lock_page(page);		}		err = brw_kiovec(WRITE, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);		DEBUG(3, "commit_write: committed %d pages err = %d\n", iobuf->nr_pages, err);		while(iobuf->nr_pages) {			struct page *page = iobuf->maplist[--iobuf->nr_pages];			ClearPageDirty(page);			SetPageUptodate(page);			unlock_page(page);			page_cache_release(page);		}	}	DEBUG(2, "blkmtd: sync: end, err = %d\n", err);	iobuf->offset = 0;	iobuf->nr_pages = 0;	iobuf->length = 0;	return err;}/** * write_pages - write block of data to device via the page cache * @dev: device to write to * @buf: data source or NULL if erase (output is set to 0xff) * @to: offset into output device * @len: amount to data to write * @retlen: amount of data written * * Grab pages from the page cache and fill them with the source data. * Non page aligned start and end result in a readin of the page and * part of the page being modified. Pages are added to the wr_buf kiobuf * until this becomes full or the next page written to has a lower pagenr * then the current max pagenr in the kiobuf. */static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,		    size_t len, int *retlen){	int pagenr, offset;	size_t start_len = 0, end_len;	int pagecnt = 0;	struct kiobuf *iobuf = dev->wr_buf;	int err = 0;	struct page *pagelst[2];	int pagenrs[2];	int readpages = 0;	int ignorepage = -1;	pagenr = to >> PAGE_SHIFT;	offset = to & ~PAGE_MASK;	DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",	      buf, (long)to, len, pagenr, offset);	*retlen = 0;	/* see if we have to do a partial write at the start */	if(offset) {		start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;		len -= start_len;	}	/* calculate the length of the other two regions */	end_len = len & ~PAGE_MASK;	len -= end_len;	if(start_len) {		pagenrs[0] = pagenr;		readpages++;		pagecnt++;	}	if(len)		pagecnt += len >> PAGE_SHIFT;	if(end_len) {		pagenrs[readpages] = pagenr + pagecnt;		readpages++;		pagecnt++;	}	DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",	      start_len, len, end_len, pagecnt);	down(&dev->wrbuf_mutex);	if(iobuf->nr_pages && ((pagenr <= iobuf->blocks[iobuf->nr_pages-1])			       || (iobuf->nr_pages + pagecnt) >= KIO_STATIC_PAGES)) {		if((pagenr == iobuf->blocks[iobuf->nr_pages-1])		   && ((iobuf->nr_pages + pagecnt) < KIO_STATIC_PAGES)) {			iobuf->nr_pages--;			ignorepage = pagenr;		} else {			DEBUG(3, "blkmtd: doing writeout pagenr = %d max_pagenr = %ld pagecnt = %d idx = %d\n",			      pagenr, iobuf->blocks[iobuf->nr_pages-1],			      pagecnt, iobuf->nr_pages);			commit_pages(dev);		}	}		if(readpages) {		err = read_pages(dev, pagenrs, pagelst, readpages);		if(err < 0)			goto readin_err;	}	if(start_len) {		/* do partial start region */		struct page *page;		DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",		      pagenr, start_len, offset);		page = pagelst[0];		BUG_ON(!buf);		if(PageDirty(page) && pagenr != ignorepage) {			err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n",			    to, start_len, len, end_len, pagenr, ignorepage);			BUG();		}		memcpy(page_address(page)+offset, buf, start_len);		SetPageDirty(page);		SetPageUptodate(page);		unlock_page(page);		buf += start_len;		*retlen = start_len;		err = 0;		iobuf->blocks[iobuf->nr_pages] = pagenr++;		iobuf->maplist[iobuf->nr_pages] = page;		iobuf->nr_pages++;	}	/* Now do the main loop to a page aligned, n page sized output */	if(len) {		int pagesc = len >> PAGE_SHIFT;		DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",		      pagenr, pagesc);		while(pagesc) {			struct page *page;			/* see if page is in the page cache */			DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);			page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr);			if(PageDirty(page) && pagenr != ignorepage) {				BUG();			}			if(!page) {				warn("write: cant grab cache page %d", pagenr);				err = -ENOMEM;				goto write_err;			}			if(!buf) {				memset(page_address(page), 0xff, PAGE_SIZE);			} else {				memcpy(page_address(page), buf, PAGE_SIZE);				buf += PAGE_SIZE;			}			iobuf->blocks[iobuf->nr_pages] = pagenr++;			iobuf->maplist[iobuf->nr_pages] = page;			iobuf->nr_pages++;			SetPageDirty(page);			SetPageUptodate(page);			unlock_page(page);			pagesc--;			*retlen += PAGE_SIZE;		}	}	if(end_len) {		/* do the third region */		struct page *page;		DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",		      pagenr, end_len);		page = pagelst[readpages-1];		BUG_ON(!buf);		if(PageDirty(page) && pagenr != ignorepage) {			err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n",			    to, start_len, len, end_len, pagenr, ignorepage);			BUG();		}		memcpy(page_address(page), buf, end_len);		SetPageDirty(page);		SetPageUptodate(page);		unlock_page(page);		DEBUG(3, "blkmtd: write: writing out partial end\n");		*retlen += end_len;		err = 0;		iobuf->blocks[iobuf->nr_pages] = pagenr;		iobuf->maplist[iobuf->nr_pages] = page;		iobuf->nr_pages++;	}	DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);	if(sync) {write_err:		commit_pages(dev);	}readin_err:	up(&dev->wrbuf_mutex);	return err;}/* erase a specified part of the device */static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr){	struct blkmtd_dev *dev = mtd->priv;	struct mtd_erase_region_info *einfo = mtd->eraseregions;	int numregions = mtd->numeraseregions;	size_t from;	u_long len;	int err = -EIO;	size_t retlen;	/* check readonly */	if(!dev->wr_buf) {		err("error: mtd%d trying to erase readonly device %s",		    mtd->index, mtd->name);		instr->state = MTD_ERASE_FAILED;		goto erase_callback;	}	instr->state = MTD_ERASING;	from = instr->addr;	len = instr->len;	/* check erase region has valid start and length */	DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",	      bdevname(dev->binding->bd_dev), from, len);	while(numregions) {		DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",		      einfo->offset, einfo->erasesize, einfo->numblocks);		if(from >= einfo->offset		   && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {			if(len == einfo->erasesize			   && ( (from - einfo->offset) % einfo->erasesize == 0))				break;		}		numregions--;		einfo++;	}	if(!numregions) {		/* Not a valid erase block */		err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);		instr->state = MTD_ERASE_FAILED;		err = -EIO;	}	if(instr->state != MTD_ERASE_FAILED) {		/* do the erase */		DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);		err = write_pages(dev, NULL, from, len, &retlen);		if(err < 0) {			err("erase failed err = %d", err);			instr->state = MTD_ERASE_FAILED;		} else {			instr->state = MTD_ERASE_DONE;			err = 0;		}	}	DEBUG(3, "blkmtd: erase: checking callback\n"); erase_callback:	mtd_erase_callback(instr);	DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);	return err;}/* read a range of the data via the page cache */static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,		       size_t *retlen, u_char *buf){	struct blkmtd_dev *dev = mtd->priv;	int err = 0;	int offset;	int pagenr, pages;	struct page **pagelst;	int *pagenrs;	int i;	*retlen = 0;	DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",	      bdevname(dev->binding->bd_dev), from, len, buf);	pagenr = from >> PAGE_SHIFT;	offset = from - (pagenr << PAGE_SHIFT);	pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;	DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",	      pagenr, offset, pages);	pagelst = kmalloc(sizeof(struct page *) * pages, GFP_KERNEL);	if(!pagelst)		return -ENOMEM;	pagenrs = kmalloc(sizeof(int) * pages, GFP_KERNEL);	if(!pagenrs) {		kfree(pagelst);		return -ENOMEM;	}	for(i = 0; i < pages; i++)		pagenrs[i] = pagenr+i;	err = read_pages(dev, pagenrs, pagelst, pages);	if(err)		goto readerr;	pagenr = 0;	while(pages) {		struct page *page;		int cpylen;		DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);		page = pagelst[pagenr];		cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;		if(offset+cpylen > PAGE_SIZE)			cpylen = PAGE_SIZE-offset;		memcpy(buf + *retlen, page_address(page) + offset, cpylen);		offset = 0;		len -= cpylen;		*retlen += cpylen;

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
成人免费在线视频| 中文在线免费一区三区高中清不卡| 亚洲一区二区三区四区不卡| www久久久久| 精品视频在线视频| 成人福利在线看| 日韩高清不卡一区| 最新日韩av在线| 精品理论电影在线观看| 91国产视频在线观看| 国产在线精品免费| 7777精品伊人久久久大香线蕉最新版| 久久66热re国产| 亚洲午夜免费电影| 久久精品人人爽人人爽| 欧美在线视频全部完| 国产综合色在线| 亚洲自拍欧美精品| 久久免费电影网| 欧美日韩视频在线第一区| 国产suv精品一区二区883| 亚洲成人av中文| 中文字幕欧美三区| 日韩一级在线观看| 色婷婷国产精品久久包臀| 亚洲成人自拍网| 久久精品视频一区| 欧美一区二区三区在线视频| 日本高清视频一区二区| 国产馆精品极品| 首页综合国产亚洲丝袜| 亚洲色图欧美偷拍| 久久精品夜夜夜夜久久| 91精品久久久久久久久99蜜臂| 99re视频精品| 精品中文av资源站在线观看| 爽爽淫人综合网网站| 一区二区三区在线看| 国产欧美日韩三区| 日韩久久久久久| 欧美丰满高潮xxxx喷水动漫| 不卡一区在线观看| 国产乱妇无码大片在线观看| 蜜桃视频一区二区三区| 亚洲国产视频直播| 亚洲免费在线观看| 中文字幕av免费专区久久| 日韩欧美一卡二卡| 欧美福利视频一区| 欧美日韩一区高清| 一本大道久久a久久精品综合| 国产在线观看免费一区| 亚洲一区在线播放| 亚洲欧美乱综合| 国产91高潮流白浆在线麻豆| 亚洲色图.com| 亚洲九九爱视频| 亚洲日本在线视频观看| 国产精品网曝门| 国产精品久久久久久久久免费相片| 欧美精品一区二区三区蜜臀| 欧美va在线播放| 精品sm捆绑视频| 91精品国产色综合久久不卡蜜臀| 制服丝袜国产精品| 在线视频国内自拍亚洲视频| 91丨porny丨最新| 国产二区国产一区在线观看| 日本不卡一区二区| 亚洲一区二区三区四区五区黄| 亚洲一区二区三区在线| 日韩专区欧美专区| 免费在线欧美视频| 精品一区二区三区在线视频| 国产在线精品免费av| 国产精一区二区三区| 成人的网站免费观看| 91在线国内视频| 在线观看亚洲成人| 在线播放中文一区| 欧美丰满少妇xxxbbb| 欧美videossexotv100| 日韩欧美中文字幕精品| 欧美绝品在线观看成人午夜影视| 欧美色图免费看| 欧美日本一区二区在线观看| 欧美精品色一区二区三区| 国产色婷婷亚洲99精品小说| 亚洲国产高清在线| 一区二区不卡在线视频 午夜欧美不卡在| 一区二区三区国产| 蜜乳av一区二区| 高清beeg欧美| 欧美日韩久久久| 精品国产91久久久久久久妲己| 久久伊人蜜桃av一区二区| 国产精品久久久久久久蜜臀| 亚洲图片欧美一区| 激情综合亚洲精品| 色香蕉久久蜜桃| 日韩三级中文字幕| 中文字幕制服丝袜成人av| 午夜婷婷国产麻豆精品| 国产一区二区看久久| 91丨九色丨黑人外教| 精品久久人人做人人爱| 久久综合久久综合九色| 国产精品视频免费看| 亚洲色图色小说| 奇米亚洲午夜久久精品| 99久久婷婷国产综合精品电影 | 国产精品乱码妇女bbbb| 亚洲bt欧美bt精品| 国产成人精品亚洲777人妖| 欧美午夜精品一区二区蜜桃| 337p粉嫩大胆噜噜噜噜噜91av| 一区二区三区四区乱视频| 极品瑜伽女神91| 欧美色图12p| 中文字幕av在线一区二区三区| 青青草国产成人av片免费| av激情综合网| 亚洲精品一区二区三区四区高清| 一区二区免费在线播放| 国产精品白丝av| 日韩一区二区三区视频| 亚洲免费观看高清完整版在线观看熊| 激情五月婷婷综合| 欧美卡1卡2卡| 一区二区三区小说| 国产不卡一区视频| 日韩精品在线看片z| 亚洲成在线观看| 一道本成人在线| 国产清纯在线一区二区www| 日韩中文字幕不卡| 日本精品视频一区二区三区| 国产日韩影视精品| 极品美女销魂一区二区三区 | 亚洲一本大道在线| 成人精品高清在线| 国产婷婷一区二区| 久久精品久久精品| 日韩一级黄色片| 婷婷开心激情综合| 欧美日韩在线亚洲一区蜜芽| 樱花草国产18久久久久| 99re视频精品| 国产日韩影视精品| 久草热8精品视频在线观看| 日韩一级大片在线观看| 日本成人中文字幕| 日韩一级高清毛片| 免费看日韩精品| 精品日韩成人av| 国产一区二区91| 国产亚洲欧美激情| 国产99久久久精品| 日本一区二区在线不卡| 国产999精品久久久久久绿帽| 久久久久久亚洲综合| 国产一区在线观看视频| 久久久蜜桃精品| 国产大陆亚洲精品国产| 国产精品三级电影| 91在线你懂得| 亚洲一区二区三区四区五区黄| 欧美日韩国产综合久久| 日本伊人精品一区二区三区观看方式| 制服丝袜亚洲播放| 国内精品免费**视频| 国产三区在线成人av| 99热99精品| 国产精品久久久久aaaa| 国产不卡视频在线观看| 国产精品对白交换视频| 一本色道久久综合狠狠躁的推荐| 亚洲黄网站在线观看| 制服丝袜亚洲色图| 精品一区二区三区免费| 国产欧美日韩不卡| 色综合咪咪久久| 日本午夜一区二区| 久久久久综合网| 91丨porny丨国产入口| 亚洲成精国产精品女| 精品国产第一区二区三区观看体验| 国产v综合v亚洲欧| 亚洲综合色噜噜狠狠| 精品国产自在久精品国产| 国产成人午夜精品影院观看视频 | 一区二区成人在线观看| 欧美国产日产图区| 色综合色狠狠综合色| 捆绑调教美女网站视频一区| 精品盗摄一区二区三区| 成人毛片视频在线观看| 亚洲乱码精品一二三四区日韩在线| 欧美一区二区日韩一区二区| 日本不卡123|