亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來(lái)到蟲(chóng)蟲(chóng)下載站! | ?? 資源下載 ?? 資源專輯 ?? 關(guān)于我們
? 蟲(chóng)蟲(chóng)下載站

?? ub.c

?? linux 內(nèi)核源代碼
?? C
?? 第 1 頁(yè) / 共 5 頁(yè)
字號(hào):
	}}/* * Final cleanup and deallocation. */static void ub_cleanup(struct ub_dev *sc){	struct list_head *p;	struct ub_lun *lun;	struct request_queue *q;	while (!list_empty(&sc->luns)) {		p = sc->luns.next;		lun = list_entry(p, struct ub_lun, link);		list_del(p);		/* I don't think queue can be NULL. But... Stolen from sx8.c */		if ((q = lun->disk->queue) != NULL)			blk_cleanup_queue(q);		/*		 * If we zero disk->private_data BEFORE put_disk, we have		 * to check for NULL all over the place in open, release,		 * check_media and revalidate, because the block level		 * semaphore is well inside the put_disk.		 * But we cannot zero after the call, because *disk is gone.		 * The sd.c is blatantly racy in this area.		 */		/* disk->private_data = NULL; */		put_disk(lun->disk);		lun->disk = NULL;		ub_id_put(lun->id);		kfree(lun);	}	usb_set_intfdata(sc->intf, NULL);	usb_put_intf(sc->intf);	usb_put_dev(sc->dev);	kfree(sc);}/* * The "command allocator". */static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun){	struct ub_scsi_cmd *ret;	if (lun->cmda[0])		return NULL;	ret = &lun->cmdv[0];	lun->cmda[0] = 1;	return ret;}static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd){	if (cmd != &lun->cmdv[0]) {		printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",		    lun->name, cmd);		return;	}	if (!lun->cmda[0]) {		printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);		return;	}	lun->cmda[0] = 0;}/* * The command queue. */static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		t->tail->next = cmd;		t->tail = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	if (t->qlen++ == 0) {		t->head = cmd;		t->tail = cmd;	} else {		cmd->next = t->head;		t->head = cmd;	}	if (t->qlen > t->qmax)		t->qmax = t->qlen;}static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc){	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;	struct ub_scsi_cmd *cmd;	if (t->qlen == 0)		return NULL;	if (--t->qlen == 0)		t->tail = NULL;	cmd = t->head;	t->head = cmd->next;	cmd->next = NULL;	return cmd;}#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)/* * The request function is our main entry point */static void ub_request_fn(struct request_queue *q){	struct ub_lun *lun = q->queuedata;	struct request *rq;	while ((rq = elv_next_request(q)) != NULL) {		if (ub_request_fn_1(lun, rq) != 0) {			blk_stop_queue(q);			break;		}	}}static int ub_request_fn_1(struct ub_lun *lun, struct request *rq){	struct ub_dev *sc = lun->udev;	struct ub_scsi_cmd *cmd;	struct ub_request *urq;	int n_elem;	if (atomic_read(&sc->poison)) {		blkdev_dequeue_request(rq);		ub_end_rq(rq, DID_NO_CONNECT << 16);		return 0;	}	if (lun->changed && !blk_pc_request(rq)) {		blkdev_dequeue_request(rq);		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);		return 0;	}	if (lun->urq.rq != NULL)		return -1;	if ((cmd = ub_get_cmd(lun)) == NULL)		return -1;	memset(cmd, 0, sizeof(struct ub_scsi_cmd));	sg_init_table(cmd->sgv, UB_MAX_REQ_SG);	blkdev_dequeue_request(rq);	urq = &lun->urq;	memset(urq, 0, sizeof(struct ub_request));	urq->rq = rq;	/*	 * get scatterlist from block layer	 */	n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);	if (n_elem < 0) {		/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */		printk(KERN_INFO "%s: failed request map (%d)\n",		    lun->name, n_elem);		goto drop;	}	if (n_elem > UB_MAX_REQ_SG) {	/* Paranoia */		printk(KERN_WARNING "%s: request with %d segments\n",		    lun->name, n_elem);		goto drop;	}	urq->nsg = n_elem;	sc->sg_stat[n_elem < 5 ? n_elem : 5]++;	if (blk_pc_request(rq)) {		ub_cmd_build_packet(sc, lun, cmd, urq);	} else {		ub_cmd_build_block(sc, lun, cmd, urq);	}	cmd->state = UB_CMDST_INIT;	cmd->lun = lun;	cmd->done = ub_rw_cmd_done;	cmd->back = urq;	cmd->tag = sc->tagcnt++;	if (ub_submit_scsi(sc, cmd) != 0)		goto drop;	return 0;drop:	ub_put_cmd(lun, cmd);	ub_end_rq(rq, DID_ERROR << 16);	return 0;}static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,    struct ub_scsi_cmd *cmd, struct ub_request *urq){	struct request *rq = urq->rq;	unsigned int block, nblks;	if (rq_data_dir(rq) == WRITE)		cmd->dir = UB_DIR_WRITE;	else		cmd->dir = UB_DIR_READ;	cmd->nsg = urq->nsg;	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);	/*	 * build the command	 *	 * The call to blk_queue_hardsect_size() guarantees that request	 * is aligned, but it is given in terms of 512 byte units, always.	 */	block = rq->sector >> lun->capacity.bshift;	nblks = rq->nr_sectors >> lun->capacity.bshift;	cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */	cmd->cdb[2] = block >> 24;	cmd->cdb[3] = block >> 16;	cmd->cdb[4] = block >> 8;	cmd->cdb[5] = block;	cmd->cdb[7] = nblks >> 8;	cmd->cdb[8] = nblks;	cmd->cdb_len = 10;	cmd->len = rq->nr_sectors * 512;}static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,    struct ub_scsi_cmd *cmd, struct ub_request *urq){	struct request *rq = urq->rq;	if (rq->data_len == 0) {		cmd->dir = UB_DIR_NONE;	} else {		if (rq_data_dir(rq) == WRITE)			cmd->dir = UB_DIR_WRITE;		else			cmd->dir = UB_DIR_READ;	}	cmd->nsg = urq->nsg;	memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);	memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);	cmd->cdb_len = rq->cmd_len;	cmd->len = rq->data_len;}static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct ub_lun *lun = cmd->lun;	struct ub_request *urq = cmd->back;	struct request *rq;	unsigned int scsi_status;	rq = urq->rq;	if (cmd->error == 0) {		if (blk_pc_request(rq)) {			if (cmd->act_len >= rq->data_len)				rq->data_len = 0;			else				rq->data_len -= cmd->act_len;		}		scsi_status = 0;	} else {		if (blk_pc_request(rq)) {			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);			rq->sense_len = UB_SENSE_SIZE;			if (sc->top_sense[0] != 0)				scsi_status = SAM_STAT_CHECK_CONDITION;			else				scsi_status = DID_ERROR << 16;		} else {			if (cmd->error == -EIO) {				if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)					return;			}			scsi_status = SAM_STAT_CHECK_CONDITION;		}	}	urq->rq = NULL;	ub_put_cmd(lun, cmd);	ub_end_rq(rq, scsi_status);	blk_start_queue(lun->disk->queue);}static void ub_end_rq(struct request *rq, unsigned int scsi_status){	int uptodate;	if (scsi_status == 0) {		uptodate = 1;	} else {		uptodate = 0;		rq->errors = scsi_status;	}	end_that_request_first(rq, uptodate, rq->hard_nr_sectors);	end_that_request_last(rq, uptodate);}static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,    struct ub_request *urq, struct ub_scsi_cmd *cmd){	if (atomic_read(&sc->poison))		return -ENXIO;	ub_reset_enter(sc, urq->current_try);	if (urq->current_try >= 3)		return -EIO;	urq->current_try++;	/* Remove this if anyone complains of flooding. */	printk(KERN_DEBUG "%s: dir %c len/act %d/%d "	    "[sense %x %02x %02x] retry %d\n",	    sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,	    cmd->key, cmd->asc, cmd->ascq, urq->current_try);	memset(cmd, 0, sizeof(struct ub_scsi_cmd));	ub_cmd_build_block(sc, lun, cmd, urq);	cmd->state = UB_CMDST_INIT;	cmd->lun = lun;	cmd->done = ub_rw_cmd_done;	cmd->back = urq;	cmd->tag = sc->tagcnt++;#if 0 /* Wasteful */	return ub_submit_scsi(sc, cmd);#else	ub_cmdq_add(sc, cmd);	return 0;#endif}/* * Submit a regular SCSI operation (not an auto-sense). * * The Iron Law of Good Submit Routine is: * Zero return - callback is done, Nonzero return - callback is not done. * No exceptions. * * Host is assumed locked. */static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	if (cmd->state != UB_CMDST_INIT ||	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {		return -EINVAL;	}	ub_cmdq_add(sc, cmd);	/*	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little	 * safer to jump to a tasklet, in case upper layers do something silly.	 */	tasklet_schedule(&sc->tasklet);	return 0;}/* * Submit the first URB for the queued command. * This function does not deal with queueing in any way. */static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd){	struct bulk_cb_wrap *bcb;	int rc;	bcb = &sc->work_bcb;	/*	 * ``If the allocation length is eighteen or greater, and a device	 * server returns less than eithteen bytes of data, the application	 * client should assume that the bytes not transferred would have been	 * zeroes had the device server returned those bytes.''	 *	 * We zero sense for all commands so that when a packet request	 * fails it does not return a stale sense.	 */	memset(&sc->top_sense, 0, UB_SENSE_SIZE);	/* set up the command wrapper */	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);	bcb->Tag = cmd->tag;		/* Endianness is not important */	bcb->DataTransferLength = cpu_to_le32(cmd->len);	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;	bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;	bcb->Length = cmd->cdb_len;	/* copy the command payload */	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);	UB_INIT_COMPLETION(sc->work_done);	sc->last_pipe = sc->send_bulk_pipe;	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);	/* Fill what we shouldn't be filling, because usb-storage did so. */	sc->work_urb.actual_length = 0;	sc->work_urb.error_count = 0;	sc->work_urb.status = 0;	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {		/* XXX Clear stalls */		ub_complete(&sc->work_done);		return rc;	}	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;	add_timer(&sc->work_timer);	cmd->state = UB_CMDST_CMD;	return 0;}/* * Timeout handler. */static void ub_urb_timeout(unsigned long arg){	struct ub_dev *sc = (struct ub_dev *) arg;	unsigned long flags;	spin_lock_irqsave(sc->lock, flags);	if (!ub_is_completed(&sc->work_done))		usb_unlink_urb(&sc->work_urb);	spin_unlock_irqrestore(sc->lock, flags);}/* * Completion routine for the work URB. * * This can be called directly from usb_submit_urb (while we have * the sc->lock taken) and from an interrupt (while we do NOT have * the sc->lock taken). Therefore, bounce this off to a tasklet. */static void ub_urb_complete(struct urb *urb){	struct ub_dev *sc = urb->context;	ub_complete(&sc->work_done);	tasklet_schedule(&sc->tasklet);}static void ub_scsi_action(unsigned long _dev){	struct ub_dev *sc = (struct ub_dev *) _dev;	unsigned long flags;	spin_lock_irqsave(sc->lock, flags);	ub_scsi_dispatch(sc);	spin_unlock_irqrestore(sc->lock, flags);}static void ub_scsi_dispatch(struct ub_dev *sc){	struct ub_scsi_cmd *cmd;	int rc;	while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {		if (cmd->state == UB_CMDST_DONE) {			ub_cmdq_pop(sc);			(*cmd->done)(sc, cmd);		} else if (cmd->state == UB_CMDST_INIT) {			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)

?? 快捷鍵說(shuō)明

復(fù)制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號(hào) Ctrl + =
減小字號(hào) Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
99视频一区二区三区| 毛片av一区二区三区| 99精品视频在线播放观看| 国产精品美女久久久久久久久久久| 国产suv一区二区三区88区| 中文字幕亚洲综合久久菠萝蜜| 99国产精品久久久| 成人sese在线| 亚洲欧美韩国综合色| 色琪琪一区二区三区亚洲区| 亚洲电影一区二区三区| 欧美一级高清大全免费观看| 国产综合久久久久影院| 国产精品三级视频| 欧美三级视频在线观看| 韩国精品一区二区| 成人欧美一区二区三区白人 | 欧美日韩中文字幕一区| 美国毛片一区二区| 日本一区二区免费在线观看视频 | 午夜天堂影视香蕉久久| 精品国产sm最大网站| www.在线欧美| 另类专区欧美蜜桃臀第一页| 中文字幕精品一区二区精品绿巨人 | 国产清纯美女被跳蛋高潮一区二区久久w| 成人免费福利片| 日韩精品色哟哟| 中文字幕国产一区二区| 在线电影国产精品| www.在线成人| 国模少妇一区二区三区| 亚洲福利视频三区| 国产精品久久久久久久午夜片| 91精品国产乱| 91理论电影在线观看| 九九**精品视频免费播放| 亚洲激情网站免费观看| 久久久久久久国产精品影院| 欧美日本一区二区三区四区| 成人手机电影网| 久久99精品视频| 五月天激情综合网| 亚洲女同一区二区| 国产精品你懂的| xnxx国产精品| 337p亚洲精品色噜噜狠狠| 99久久精品国产导航| 国产一区二区伦理| 蜜桃一区二区三区四区| 亚洲综合无码一区二区| 国产精品午夜免费| 久久夜色精品一区| 欧美一卡二卡三卡四卡| 欧美日韩一卡二卡三卡 | 韩国成人精品a∨在线观看| 亚洲成人一区在线| 亚洲欧美欧美一区二区三区| 久久久综合网站| 欧美刺激午夜性久久久久久久| 欧美性大战久久| 色老头久久综合| 99久久久免费精品国产一区二区| 欧美久久一二区| 欧美日韩视频第一区| 91猫先生在线| 91首页免费视频| 91女厕偷拍女厕偷拍高清| 丁香激情综合五月| 国产成人一级电影| 国产黄人亚洲片| 成人激情综合网站| www.av亚洲| 91免费小视频| 在线视频你懂得一区二区三区| 99久久精品国产观看| 色综合天天做天天爱| 欧美在线免费观看视频| 欧美日韩一区不卡| 9191国产精品| 欧美zozo另类异族| 久久久久久久久久久久久女国产乱| 久久久久久麻豆| 中文字幕在线观看一区| 一区二区在线观看不卡| 亚洲成在线观看| 免费精品99久久国产综合精品| 欧美aaaaa成人免费观看视频| 美腿丝袜一区二区三区| 国产麻豆91精品| 成人av在线播放网站| 欧日韩精品视频| 欧美日韩国产系列| 精品国产污污免费网站入口 | 不卡av在线网| 91国产视频在线观看| 欧美精品久久一区二区三区| 欧美成人a在线| 日本一区二区免费在线| 亚洲黄色录像片| 日日摸夜夜添夜夜添精品视频| 日本午夜一区二区| 成人综合在线网站| 欧美三级电影网站| xfplay精品久久| 亚洲你懂的在线视频| 麻豆中文一区二区| 99精品1区2区| 欧美一区二视频| 国产精品成人免费在线| 亚洲第一精品在线| 国产精品一区2区| 91搞黄在线观看| 久久亚洲一区二区三区明星换脸| 亚洲少妇最新在线视频| 蜜臀精品久久久久久蜜臀| 成人h动漫精品| 日韩一级视频免费观看在线| 国产欧美日韩麻豆91| 亚洲国产精品麻豆| 国产福利91精品一区二区三区| 色妞www精品视频| 欧美精品一区二区三区蜜桃视频| 亚洲精品ww久久久久久p站| 激情综合亚洲精品| 国产精品色眯眯| 久久国产尿小便嘘嘘| 一本久道久久综合中文字幕| 精品国产凹凸成av人导航| 亚洲午夜在线视频| 成人午夜大片免费观看| 欧美成人官网二区| 夜夜揉揉日日人人青青一国产精品| 激情av综合网| 91精品久久久久久久久99蜜臂| 国产精品免费视频观看| 激情小说欧美图片| 欧美日韩精品一区二区三区四区| 中文字幕日韩欧美一区二区三区| 蓝色福利精品导航| 欧美日韩色综合| 亚洲人被黑人高潮完整版| 国产一区免费电影| 欧美一级一级性生活免费录像| 亚洲免费在线播放| 成人av电影在线| 久久亚洲二区三区| 麻豆视频一区二区| 91精品国产综合久久久蜜臀粉嫩 | 国产精品乱码人人做人人爱| 久久国产尿小便嘘嘘| 91精品一区二区三区久久久久久| 亚洲最快最全在线视频| 91免费版在线| 国产精品激情偷乱一区二区∴| 国产成人日日夜夜| 国产日产亚洲精品系列| 国产一区啦啦啦在线观看| 精品久久久久久无| 狠狠色狠狠色综合日日91app| 日韩精品一区二区三区在线播放 | 欧美日韩一级黄| 亚洲成人动漫在线免费观看| 91福利精品视频| 一区二区三区日韩欧美精品 | 欧美人成免费网站| 午夜伦欧美伦电影理论片| 欧美日韩国产在线观看| 五月天激情小说综合| 在线电影一区二区三区| 麻豆传媒一区二区三区| 日韩三级免费观看| 国产在线精品一区二区不卡了| 久久一二三国产| 国产成人精品1024| 国产精品乱人伦| 在线观看一区二区精品视频| 午夜天堂影视香蕉久久| 91精品国产免费| 国产精品一区二区不卡| 中日韩av电影| 在线观看一区日韩| 美腿丝袜亚洲综合| 欧美韩国日本不卡| 色婷婷av一区二区| 日韩高清一级片| www国产成人| 成人免费毛片a| 亚洲高清一区二区三区| 日韩免费性生活视频播放| 欧美丰满嫩嫩电影| 韩国女主播一区| 国产精品初高中害羞小美女文| 欧美视频一区二区三区| 久久99精品久久久久婷婷| 国产精品毛片久久久久久久| 欧美午夜精品一区| 韩国三级在线一区| 亚洲品质自拍视频| 精品对白一区国产伦|