亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? xfs_buf.c

?? 優龍2410linux2.6.8內核源代碼
?? C
?? 第 1 頁 / 共 3 頁
字號:
/* * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like.  Any license provided herein, whether implied or * otherwise, applies only to this software file.  Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, * Mountain View, CA  94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ *//* *	The xfs_buf.c code provides an abstract buffer cache model on top *	of the Linux page cache.  Cached metadata blocks for a file system *	are hashed to the inode for the block device.  xfs_buf.c assembles *	buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O. * *      Written by Steve Lord, Jim Mostek, Russell Cattelan *		    and Rajagopal Ananthanarayanan ("ananth") at SGI. * */#include <linux/stddef.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/pagemap.h>#include <linux/init.h>#include <linux/vmalloc.h>#include <linux/bio.h>#include <linux/sysctl.h>#include <linux/proc_fs.h>#include <linux/workqueue.h>#include <linux/suspend.h>#include <linux/percpu.h>#include "xfs_linux.h"#ifndef GFP_READAHEAD#define GFP_READAHEAD	(__GFP_NOWARN|__GFP_NORETRY)#endif/* * File wide globals */STATIC kmem_cache_t *pagebuf_cache;STATIC kmem_shaker_t pagebuf_shake;STATIC int pagebuf_daemon_wakeup(int, unsigned int);STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);STATIC struct workqueue_struct *pagebuf_logio_workqueue;STATIC struct workqueue_struct *pagebuf_dataio_workqueue;/* * Pagebuf debugging */#ifdef PAGEBUF_TRACEvoidpagebuf_trace(	xfs_buf_t	*pb,	char		*id,	void		*data,	void		*ra){	ktrace_enter(pagebuf_trace_buf,		pb, id,		(void *)(unsigned long)pb->pb_flags,		(void *)(unsigned long)pb->pb_hold.counter,		(void *)(unsigned long)pb->pb_sema.count.counter,		(void *)current,		data, ra,		(void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),		(void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),		(void *)(unsigned long)pb->pb_buffer_length,		NULL, NULL, NULL, NULL, NULL);}ktrace_t *pagebuf_trace_buf;#define PAGEBUF_TRACE_SIZE	4096#define PB_TRACE(pb, id, data)	\	pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))#else#define PB_TRACE(pb, id, data)	do { } while (0)#endif#ifdef PAGEBUF_LOCK_TRACKING# define PB_SET_OWNER(pb)	((pb)->pb_last_holder = current->pid)# define PB_CLEAR_OWNER(pb)	((pb)->pb_last_holder = -1)# define PB_GET_OWNER(pb)	((pb)->pb_last_holder)#else# define PB_SET_OWNER(pb)	do { } while (0)# define PB_CLEAR_OWNER(pb)	do { } while (0)# define PB_GET_OWNER(pb)	do { } while (0)#endif/* * Pagebuf allocation / freeing. */#define pb_to_gfp(flags) \	(((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \	 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL)#define pb_to_km(flags) \	 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)#define pagebuf_allocate(flags) \	kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))#define pagebuf_deallocate(pb) \	kmem_zone_free(pagebuf_cache, (pb));/* * Pagebuf hashing */#define NBITS	8#define NHASH	(1<<NBITS)typedef struct {	struct list_head	pb_hash;	spinlock_t		pb_hash_lock;} pb_hash_t;STATIC pb_hash_t	pbhash[NHASH];#define pb_hash(pb)	&pbhash[pb->pb_hash_index]STATIC int_bhash(	struct block_device *bdev,	loff_t		base){	int		bit, hval;	base >>= 9;	base ^= (unsigned long)bdev / L1_CACHE_BYTES;	for (bit = hval = 0; base && bit < sizeof(base) * 8; bit += NBITS) {		hval ^= (int)base & (NHASH-1);		base >>= NBITS;	}	return hval;}/* * Mapping of multi-page buffers into contiguous virtual space */typedef struct a_list {	void		*vm_addr;	struct a_list	*next;} a_list_t;STATIC a_list_t		*as_free_head;STATIC int		as_list_len;STATIC spinlock_t	as_lock = SPIN_LOCK_UNLOCKED;/* * Try to batch vunmaps because they are costly. */STATIC voidfree_address(	void		*addr){	a_list_t	*aentry;	aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC);	if (aentry) {		spin_lock(&as_lock);		aentry->next = as_free_head;		aentry->vm_addr = addr;		as_free_head = aentry;		as_list_len++;		spin_unlock(&as_lock);	} else {		vunmap(addr);	}}STATIC voidpurge_addresses(void){	a_list_t	*aentry, *old;	if (as_free_head == NULL)		return;	spin_lock(&as_lock);	aentry = as_free_head;	as_free_head = NULL;	as_list_len = 0;	spin_unlock(&as_lock);	while ((old = aentry) != NULL) {		vunmap(aentry->vm_addr);		aentry = aentry->next;		kfree(old);	}}/* *	Internal pagebuf object manipulation */STATIC void_pagebuf_initialize(	xfs_buf_t		*pb,	xfs_buftarg_t		*target,	loff_t			range_base,	size_t			range_length,	page_buf_flags_t	flags){	/*	 * We don't want certain flags to appear in pb->pb_flags.	 */	flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);	memset(pb, 0, sizeof(xfs_buf_t));	atomic_set(&pb->pb_hold, 1);	init_MUTEX_LOCKED(&pb->pb_iodonesema);	INIT_LIST_HEAD(&pb->pb_list);	INIT_LIST_HEAD(&pb->pb_hash_list);	init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */	PB_SET_OWNER(pb);	pb->pb_target = target;	pb->pb_file_offset = range_base;	/*	 * Set buffer_length and count_desired to the same value initially.	 * I/O routines should use count_desired, which will be the same in	 * most cases but may be reset (e.g. XFS recovery).	 */	pb->pb_buffer_length = pb->pb_count_desired = range_length;	pb->pb_flags = flags | PBF_NONE;	pb->pb_bn = XFS_BUF_DADDR_NULL;	atomic_set(&pb->pb_pin_count, 0);	init_waitqueue_head(&pb->pb_waiters);	XFS_STATS_INC(pb_create);	PB_TRACE(pb, "initialize", target);}/* * Allocate a page array capable of holding a specified number * of pages, and point the page buf at it. */STATIC int_pagebuf_get_pages(	xfs_buf_t		*pb,	int			page_count,	page_buf_flags_t	flags){	/* Make sure that we have a page list */	if (pb->pb_pages == NULL) {		pb->pb_offset = page_buf_poff(pb->pb_file_offset);		pb->pb_page_count = page_count;		if (page_count <= PB_PAGES) {			pb->pb_pages = pb->pb_page_array;		} else {			pb->pb_pages = kmem_alloc(sizeof(struct page *) *					page_count, pb_to_km(flags));			if (pb->pb_pages == NULL)				return -ENOMEM;		}		memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);	}	return 0;}/* *	Frees pb_pages if it was malloced. */STATIC void_pagebuf_free_pages(	xfs_buf_t	*bp){	if (bp->pb_pages != bp->pb_page_array) {		kmem_free(bp->pb_pages,			  bp->pb_page_count * sizeof(struct page *));	}}/* *	Releases the specified buffer. * * 	The modification state of any associated pages is left unchanged. * 	The buffer most not be on any hash - use pagebuf_rele instead for * 	hashed and refcounted buffers */voidpagebuf_free(	xfs_buf_t		*bp){	PB_TRACE(bp, "free", 0);	ASSERT(list_empty(&bp->pb_hash_list));	if (bp->pb_flags & _PBF_PAGE_CACHE) {		uint		i;		if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))			free_address(bp->pb_addr - bp->pb_offset);		for (i = 0; i < bp->pb_page_count; i++)			page_cache_release(bp->pb_pages[i]);		_pagebuf_free_pages(bp);	} else if (bp->pb_flags & _PBF_KMEM_ALLOC) {		 /*		  * XXX(hch): bp->pb_count_desired might be incorrect (see		  * pagebuf_associate_memory for details), but fortunately		  * the Linux version of kmem_free ignores the len argument..		  */		kmem_free(bp->pb_addr, bp->pb_count_desired);		_pagebuf_free_pages(bp);	}	pagebuf_deallocate(bp);}/* *	Finds all pages for buffer in question and builds it's page list. */STATIC int_pagebuf_lookup_pages(	xfs_buf_t		*bp,	uint			flags){	struct address_space	*mapping = bp->pb_target->pbr_mapping;	unsigned int		sectorshift = bp->pb_target->pbr_sshift;	size_t			blocksize = bp->pb_target->pbr_bsize;	size_t			size = bp->pb_count_desired;	size_t			nbytes, offset;	int			gfp_mask = pb_to_gfp(flags);	unsigned short		page_count, i;	pgoff_t			first;	loff_t			end;	int			error;	end = bp->pb_file_offset + bp->pb_buffer_length;	page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);	error = _pagebuf_get_pages(bp, page_count, flags);	if (unlikely(error))		return error;	bp->pb_flags |= _PBF_PAGE_CACHE;	offset = bp->pb_offset;	first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;	for (i = 0; i < bp->pb_page_count; i++) {		struct page	*page;		uint		retries = 0;	      retry:		page = find_or_create_page(mapping, first + i, gfp_mask);		if (unlikely(page == NULL)) {			if (flags & PBF_READ_AHEAD) {				bp->pb_page_count = i;				for (i = 0; i < bp->pb_page_count; i++)					unlock_page(bp->pb_pages[i]);				return -ENOMEM;			}			/*			 * This could deadlock.			 *			 * But until all the XFS lowlevel code is revamped to			 * handle buffer allocation failures we can't do much.			 */			if (!(++retries % 100))				printk(KERN_ERR					"possible deadlock in %s (mode:0x%x)\n",					__FUNCTION__, gfp_mask);			XFS_STATS_INC(pb_page_retries);			pagebuf_daemon_wakeup(0, gfp_mask);			set_current_state(TASK_UNINTERRUPTIBLE);			schedule_timeout(10);			goto retry;		}		XFS_STATS_INC(pb_page_found);		nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);		size -= nbytes;		if (!PageUptodate(page)) {			page_count--;			if (blocksize == PAGE_CACHE_SIZE) {				if (flags & PBF_READ)					bp->pb_locked = 1;			} else if (!PagePrivate(page)) {				unsigned long	j, range;				/*				 * In this case page->private holds a bitmap				 * of uptodate sectors within the page				 */				ASSERT(blocksize < PAGE_CACHE_SIZE);				range = (offset + nbytes) >> sectorshift;				for (j = offset >> sectorshift; j < range; j++)					if (!test_bit(j, &page->private))						break;				if (j == range)					page_count++;			}		}		bp->pb_pages[i] = page;		offset = 0;	}	if (!bp->pb_locked) {		for (i = 0; i < bp->pb_page_count; i++)			unlock_page(bp->pb_pages[i]);	}	if (page_count) {		/* if we have any uptodate pages, mark that in the buffer */		bp->pb_flags &= ~PBF_NONE;		/* if some pages aren't uptodate, mark that in the buffer */		if (page_count != bp->pb_page_count)			bp->pb_flags |= PBF_PARTIAL;	}	PB_TRACE(bp, "lookup_pages", (long)page_count);	return error;}/* *	Map buffer into kernel address-space if nessecary. */STATIC int_pagebuf_map_pages(	xfs_buf_t		*bp,	uint			flags){	/* A single page buffer is always mappable */	if (bp->pb_page_count == 1) {		bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;		bp->pb_flags |= PBF_MAPPED;	} else if (flags & PBF_MAPPED) {		if (as_list_len > 64)			purge_addresses();		bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,				VM_MAP, PAGE_KERNEL);		if (unlikely(bp->pb_addr == NULL))			return -ENOMEM;		bp->pb_addr += bp->pb_offset;		bp->pb_flags |= PBF_MAPPED;	}	return 0;}/* *	Finding and Reading Buffers *//* *	_pagebuf_find * *	Looks up, and creates if absent, a lockable buffer for *	a given range of an inode.  The buffer is returned *	locked.	 If other overlapping buffers exist, they are *	released before the new buffer is created and locked, *	which may imply that this call will block until those buffers *	are unlocked.  No I/O is implied by this call. */STATIC xfs_buf_t *_pagebuf_find(				/* find buffer for block	*/	xfs_buftarg_t		*target,/* target for block		*/	loff_t			ioff,	/* starting offset of range	*/	size_t			isize,	/* length of range		*/	page_buf_flags_t	flags,	/* PBF_TRYLOCK			*/	xfs_buf_t		*new_pb)/* newly allocated buffer	*/{	loff_t			range_base;	size_t			range_length;	int			hval;	pb_hash_t		*h;	xfs_buf_t		*pb, *n;	int			not_locked;	range_base = (ioff << BBSHIFT);	range_length = (isize << BBSHIFT);	/* Ensure we never do IOs smaller than the sector size */	BUG_ON(range_length < (1 << target->pbr_sshift));	/* Ensure we never do IOs that are not sector aligned */	BUG_ON(range_base & (loff_t)target->pbr_smask);	hval = _bhash(target->pbr_bdev, range_base);	h = &pbhash[hval];	spin_lock(&h->pb_hash_lock);	list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {		if (pb->pb_target == target &&		    pb->pb_file_offset == range_base &&		    pb->pb_buffer_length == range_length) {			/* If we look at something bring it to the			 * front of the list for next time			 */			atomic_inc(&pb->pb_hold);			list_move(&pb->pb_hash_list, &h->pb_hash);			goto found;		}	}	/* No match found */	if (new_pb) {		_pagebuf_initialize(new_pb, target, range_base,				range_length, flags);		new_pb->pb_hash_index = hval;		list_add(&new_pb->pb_hash_list, &h->pb_hash);	} else {		XFS_STATS_INC(pb_miss_locked);	}	spin_unlock(&h->pb_hash_lock);	return (new_pb);found:	spin_unlock(&h->pb_hash_lock);	/* Attempt to get the semaphore without sleeping,	 * if this does not work then we need to drop the	 * spinlock and do a hard attempt on the semaphore.	 */	not_locked = down_trylock(&pb->pb_sema);	if (not_locked) {		if (!(flags & PBF_TRYLOCK)) {			/* wait for buffer ownership */			PB_TRACE(pb, "get_lock", 0);			pagebuf_lock(pb);			XFS_STATS_INC(pb_get_locked_waited);		} else {			/* We asked for a trylock and failed, no need			 * to look at file offset and length here, we			 * know that this pagebuf at least overlaps our			 * pagebuf and is locked, therefore our buffer			 * either does not exist, or is this buffer			 */			pagebuf_rele(pb);			XFS_STATS_INC(pb_busy_locked);			return (NULL);		}	} else {		/* trylock worked */		PB_SET_OWNER(pb);	}	if (pb->pb_flags & PBF_STALE)		pb->pb_flags &= PBF_MAPPED;	PB_TRACE(pb, "got_lock", 0);	XFS_STATS_INC(pb_get_locked);	return (pb);}/* *	pagebuf_find * *	pagebuf_find returns a buffer matching the specified range of *	data for the specified target, if any of the relevant blocks *	are in memory.  The buffer may have unallocated holes, if *	some, but not all, of the blocks are in memory.  Even where *	pages are present in the buffer, not all of every page may be *	valid. */xfs_buf_t *pagebuf_find(				/* find buffer for block	*/					/* if the block is in memory	*/	xfs_buftarg_t		*target,/* target for block		*/	loff_t			ioff,	/* starting offset of range	*/	size_t			isize,	/* length of range		*/	page_buf_flags_t	flags)	/* PBF_TRYLOCK			*/{	return _pagebuf_find(target, ioff, isize, flags, NULL);}/* *	pagebuf_get * *	pagebuf_get assembles a buffer covering the specified range. *	Some or all of the blocks in the range may be valid.  Storage *	in memory for all portions of the buffer will be allocated, *	although backing storage may not be.  If PBF_READ is set in *	flags, pagebuf_iostart is called also. */xfs_buf_t *pagebuf_get(				/* allocate a buffer		*/

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
欧美一区二区啪啪| 风间由美性色一区二区三区| 一本色道久久综合亚洲91 | 国模套图日韩精品一区二区| 日韩精品一区二区三区蜜臀| 国内精品视频666| 国产日韩欧美一区二区三区综合| 国产福利精品一区二区| 国产精品免费视频观看| 在线视频国内一区二区| 五月综合激情网| 久久婷婷综合激情| 波多野结衣中文字幕一区二区三区| 综合av第一页| 欧美日韩国产综合一区二区| 日本va欧美va精品发布| 久久久久免费观看| 色就色 综合激情| 视频一区二区欧美| 久久久99精品免费观看不卡| 成人av集中营| 日韩av网站免费在线| 久久久久久久久久久久久久久99 | 国产精品久久二区二区| 91国产成人在线| 韩国v欧美v日本v亚洲v| 日韩美女精品在线| 精品久久久久久亚洲综合网| 99re热这里只有精品视频| 亚洲午夜私人影院| 国产日韩成人精品| 欧美日韩你懂得| av一区二区三区| 全国精品久久少妇| 国产精品欧美精品| 欧美一区二区三区在| 成人18精品视频| 日韩精品视频网| 精品国产免费视频| 色噜噜狠狠色综合中国| 日韩中文字幕av电影| 久久久久国产一区二区三区四区| av日韩在线网站| 亚洲超碰97人人做人人爱| 久久久久久久久久看片| 色偷偷成人一区二区三区91| 免费亚洲电影在线| 国产精品成人一区二区三区夜夜夜| 久久99精品久久久久久动态图| 久久久美女艺术照精彩视频福利播放| 麻豆精品一区二区三区| 亚洲日穴在线视频| 日韩限制级电影在线观看| 国产盗摄一区二区| 午夜影院久久久| 久久久99精品免费观看不卡| 欧美性高清videossexo| 国产成人在线看| 日韩精品一二区| 综合激情成人伊人| 欧美精品一区二区高清在线观看| 色婷婷精品大在线视频| 国产成人免费视频精品含羞草妖精| 一区二区三区在线影院| 久久精品夜色噜噜亚洲aⅴ| 欧美精品日韩精品| www.av精品| 韩国av一区二区三区四区| 亚洲午夜精品久久久久久久久| 国产目拍亚洲精品99久久精品| 欧美丰满一区二区免费视频| 不卡的电影网站| 国产自产2019最新不卡| 亚洲超碰精品一区二区| 亚洲女人的天堂| 国产婷婷色一区二区三区| 欧美一区二区三区视频| 欧美午夜免费电影| 91日韩精品一区| 国产一区激情在线| 日本在线不卡一区| 亚洲国产日韩av| 亚洲影视资源网| 亚洲欧美另类久久久精品| 欧美激情一区二区三区不卡| 精品三级在线观看| 日韩精品一区在线| 91精品国产综合久久久久久| 欧美性videosxxxxx| 色猫猫国产区一区二在线视频| 风间由美一区二区av101| 国产真实乱偷精品视频免| 久久精品国产精品亚洲红杏| 秋霞午夜鲁丝一区二区老狼| 午夜精品一区二区三区免费视频 | 96av麻豆蜜桃一区二区| 国产凹凸在线观看一区二区| 国产一区免费电影| 国产剧情一区二区| 国产精品亚洲午夜一区二区三区| 国产一区二区看久久| 韩国成人在线视频| 免播放器亚洲一区| 精品在线免费观看| 国产伦精品一区二区三区视频青涩 | 国产一区二区精品在线观看| 国产成人福利片| www.欧美日韩| 色综合网站在线| 91视频.com| 国产成人鲁色资源国产91色综| 成人免费黄色大片| 色老综合老女人久久久| 欧美男同性恋视频网站| 91精品综合久久久久久| 精品国产乱码久久久久久夜甘婷婷| 欧美一区二区三区白人| 国产婷婷一区二区| 亚洲欧美电影院| 性感美女极品91精品| 丝袜国产日韩另类美女| 精品一区二区国语对白| 97精品久久久久中文字幕 | 亚洲免费成人av| 午夜欧美大尺度福利影院在线看| 另类小说综合欧美亚洲| 国产99久久久精品| 欧美在线观看禁18| 久久精品视频一区二区三区| 亚洲色图制服诱惑| 亚洲成人激情综合网| 国产乱码精品一区二区三区av| 91在线看国产| 欧美电影免费提供在线观看| 亚洲视频在线一区观看| 舔着乳尖日韩一区| 国产 欧美在线| 在线观看成人小视频| 26uuu成人网一区二区三区| 亚洲精品午夜久久久| 国产一区二区三区在线看麻豆| 99久久99久久精品免费看蜜桃| 56国语精品自产拍在线观看| 国产欧美精品一区二区色综合| 一区二区三区免费网站| 国模少妇一区二区三区| 欧美又粗又大又爽| 中文字幕乱码日本亚洲一区二区| 亚洲观看高清完整版在线观看| 国产露脸91国语对白| 欧美日韩国产美| 日本一区二区久久| 蜜臀精品一区二区三区在线观看| av午夜一区麻豆| 日韩一二在线观看| 亚洲一卡二卡三卡四卡无卡久久| 国产成人av影院| 日韩欧美中文字幕一区| 亚洲一区二区三区四区在线免费观看| 国产精品亚洲一区二区三区在线| 在线成人免费观看| 夜夜精品浪潮av一区二区三区| 看电视剧不卡顿的网站| 337p亚洲精品色噜噜噜| 亚洲永久免费av| 一本到三区不卡视频| 欧美激情在线观看视频免费| 久久精品二区亚洲w码| 欧美日韩一区二区不卡| 亚洲乱码中文字幕| 一本色道久久综合精品竹菊| 久久综合九色综合97婷婷女人| 久久草av在线| 日韩精品一区二区在线| 日韩vs国产vs欧美| 在线精品视频一区二区三四| 国产精品视频第一区| 国产91色综合久久免费分享| 欧美精品一区二区久久久| 精品一区二区在线播放| 日韩一区二区三区视频在线观看| 五月激情综合网| 欧美日韩国产综合视频在线观看 | 日日夜夜精品免费视频| 欧美亚洲图片小说| 一区二区免费在线播放| 成人av在线看| 亚洲视频免费看| 91伊人久久大香线蕉| 亚洲人精品一区| 欧美三级日韩在线| 国产精品三级久久久久三级| 日本道色综合久久| 一区二区三区丝袜| 欧美精品免费视频| 麻豆精品久久久| 国产欧美日韩视频一区二区| 国产99久久久国产精品 | 中文字幕欧美一区| 91麻豆免费看片|