亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? vtime.c

?? h內核
?? C
字號:
/* *  arch/s390/kernel/vtime.c *    Virtual cpu timer based timer functions. * *  S390 version *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation *    Author(s): Jan Glauber <jan.glauber@de.ibm.com> */#include <linux/config.h>#include <linux/module.h>#include <linux/kernel.h>#include <linux/time.h>#include <linux/delay.h>#include <linux/init.h>#include <linux/smp.h>#include <linux/types.h>#include <linux/timex.h>#include <linux/notifier.h>#include <linux/kernel_stat.h>#include <linux/rcupdate.h>#include <asm/s390_ext.h>#include <asm/timer.h>#define VTIMER_MAGIC (TIMER_MAGIC + 1)static ext_int_info_t ext_int_info_timer;DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);#ifdef CONFIG_VIRT_CPU_ACCOUNTING/* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */void account_user_vtime(struct task_struct *tsk){	cputime_t cputime;	__u64 timer, clock;	int rcu_user_flag;	timer = S390_lowcore.last_update_timer;	clock = S390_lowcore.last_update_clock;	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */		      "  STCK %1"      /* Store current tod clock value */		      : "=m" (S390_lowcore.last_update_timer),		        "=m" (S390_lowcore.last_update_clock) );	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;	S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;	cputime = S390_lowcore.user_timer >> 12;	rcu_user_flag = cputime != 0;	S390_lowcore.user_timer -= cputime << 12;	S390_lowcore.steal_clock -= cputime << 12;	account_user_time(tsk, cputime);	cputime =  S390_lowcore.system_timer >> 12;	S390_lowcore.system_timer -= cputime << 12;	S390_lowcore.steal_clock -= cputime << 12;	account_system_time(tsk, HARDIRQ_OFFSET, cputime);	cputime = S390_lowcore.steal_clock;	if ((__s64) cputime > 0) {		cputime >>= 12;		S390_lowcore.steal_clock -= cputime << 12;		account_steal_time(tsk, cputime);	}	run_local_timers();	if (rcu_pending(smp_processor_id()))		rcu_check_callbacks(smp_processor_id(), rcu_user_flag);	scheduler_tick();}/* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */void account_system_vtime(struct task_struct *tsk){	cputime_t cputime;	__u64 timer;	timer = S390_lowcore.last_update_timer;	asm volatile ("  STPT %0"    /* Store current cpu timer value */		      : "=m" (S390_lowcore.last_update_timer) );	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;	cputime =  S390_lowcore.system_timer >> 12;	S390_lowcore.system_timer -= cputime << 12;	S390_lowcore.steal_clock -= cputime << 12;	account_system_time(tsk, 0, cputime);}static inline void set_vtimer(__u64 expires){	__u64 timer;	asm volatile ("  STPT %0\n"  /* Store current cpu timer value */		      "  SPT %1"     /* Set new value immediatly afterwards */		      : "=m" (timer) : "m" (expires) );	S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;	S390_lowcore.last_update_timer = expires;	/* store expire time for this CPU timer */	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;}#elsestatic inline void set_vtimer(__u64 expires){	S390_lowcore.last_update_timer = expires;	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));	/* store expire time for this CPU timer */	per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;}#endifstatic void start_cpu_timer(void){	struct vtimer_queue *vt_list;	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());	set_vtimer(vt_list->idle);}static void stop_cpu_timer(void){	__u64 done;	struct vtimer_queue *vt_list;	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());	/* nothing to do */	if (list_empty(&vt_list->list)) {		vt_list->idle = VTIMER_MAX_SLICE;		goto fire;	}	/* store progress */	asm volatile ("STPT %0" : "=m" (done));	/*	 * If done is negative we do not stop the CPU timer	 * because we will get instantly an interrupt that	 * will start the CPU timer again.	 */	if (done & 1LL<<63)		return;	else		vt_list->offset += vt_list->to_expire - done;	/* save the actual expire value */	vt_list->idle = done;	/*	 * We cannot halt the CPU timer, we just write a value that	 * nearly never expires (only after 71 years) and re-write	 * the stored expire value if we continue the timer	 */ fire:	set_vtimer(VTIMER_MAX_SLICE);}/* * Sorted add to a list. List is linear searched until first bigger * element is found. */static void list_add_sorted(struct vtimer_list *timer, struct list_head *head){	struct vtimer_list *event;	list_for_each_entry(event, head, entry) {		if (event->expires > timer->expires) {			list_add_tail(&timer->entry, &event->entry);			return;		}	}	list_add_tail(&timer->entry, head);}/* * Do the callback functions of expired vtimer events. * Called from within the interrupt handler. */static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs){	struct vtimer_queue *vt_list;	struct vtimer_list *event, *tmp;	void (*fn)(unsigned long, struct pt_regs*);	unsigned long data;	if (list_empty(cb_list))		return;	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());	list_for_each_entry_safe(event, tmp, cb_list, entry) {		fn = event->function;		data = event->data;		fn(data, regs);		if (!event->interval)			/* delete one shot timer */			list_del_init(&event->entry);		else {			/* move interval timer back to list */			spin_lock(&vt_list->lock);			list_del_init(&event->entry);			list_add_sorted(event, &vt_list->list);			spin_unlock(&vt_list->lock);		}	}}/* * Handler for the virtual CPU timer. */static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code){	int cpu;	__u64 next, delta;	struct vtimer_queue *vt_list;	struct vtimer_list *event, *tmp;	struct list_head *ptr;	/* the callback queue */	struct list_head cb_list;	INIT_LIST_HEAD(&cb_list);	cpu = smp_processor_id();	vt_list = &per_cpu(virt_cpu_timer, cpu);	/* walk timer list, fire all expired events */	spin_lock(&vt_list->lock);	if (vt_list->to_expire < VTIMER_MAX_SLICE)		vt_list->offset += vt_list->to_expire;	list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {		if (event->expires > vt_list->offset)			/* found first unexpired event, leave */			break;		/* re-charge interval timer, we have to add the offset */		if (event->interval)			event->expires = event->interval + vt_list->offset;		/* move expired timer to the callback queue */		list_move_tail(&event->entry, &cb_list);	}	spin_unlock(&vt_list->lock);	do_callbacks(&cb_list, regs);	/* next event is first in list */	spin_lock(&vt_list->lock);	if (!list_empty(&vt_list->list)) {		ptr = vt_list->list.next;		event = list_entry(ptr, struct vtimer_list, entry);		next = event->expires - vt_list->offset;		/* add the expired time from this interrupt handler		 * and the callback functions		 */		asm volatile ("STPT %0" : "=m" (delta));		delta = 0xffffffffffffffffLL - delta + 1;		vt_list->offset += delta;		next -= delta;	} else {		vt_list->offset = 0;		next = VTIMER_MAX_SLICE;	}	spin_unlock(&vt_list->lock);	set_vtimer(next);}void init_virt_timer(struct vtimer_list *timer){	timer->magic = VTIMER_MAGIC;	timer->function = NULL;	INIT_LIST_HEAD(&timer->entry);	spin_lock_init(&timer->lock);}EXPORT_SYMBOL(init_virt_timer);static inline int check_vtimer(struct vtimer_list *timer){	if (timer->magic != VTIMER_MAGIC)		return -EINVAL;	return 0;}static inline int vtimer_pending(struct vtimer_list *timer){	return (!list_empty(&timer->entry));}/* * this function should only run on the specified CPU */static void internal_add_vtimer(struct vtimer_list *timer){	unsigned long flags;	__u64 done;	struct vtimer_list *event;	struct vtimer_queue *vt_list;	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);	spin_lock_irqsave(&vt_list->lock, flags);	if (timer->cpu != smp_processor_id())		printk("internal_add_vtimer: BUG, running on wrong CPU");	/* if list is empty we only have to set the timer */	if (list_empty(&vt_list->list)) {		/* reset the offset, this may happen if the last timer was		 * just deleted by mod_virt_timer and the interrupt		 * didn't happen until here		 */		vt_list->offset = 0;		goto fire;	}	/* save progress */	asm volatile ("STPT %0" : "=m" (done));	/* calculate completed work */	done = vt_list->to_expire - done + vt_list->offset;	vt_list->offset = 0;	list_for_each_entry(event, &vt_list->list, entry)		event->expires -= done; fire:	list_add_sorted(timer, &vt_list->list);	/* get first element, which is the next vtimer slice */	event = list_entry(vt_list->list.next, struct vtimer_list, entry);	set_vtimer(event->expires);	spin_unlock_irqrestore(&vt_list->lock, flags);	/* release CPU aquired in prepare_vtimer or mod_virt_timer() */	put_cpu();}static inline int prepare_vtimer(struct vtimer_list *timer){	if (check_vtimer(timer) || !timer->function) {		printk("add_virt_timer: uninitialized timer\n");		return -EINVAL;	}	if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {		printk("add_virt_timer: invalid timer expire value!\n");		return -EINVAL;	}	if (vtimer_pending(timer)) {		printk("add_virt_timer: timer pending\n");		return -EBUSY;	}	timer->cpu = get_cpu();	return 0;}/* * add_virt_timer - add an oneshot virtual CPU timer */void add_virt_timer(void *new){	struct vtimer_list *timer;	timer = (struct vtimer_list *)new;	if (prepare_vtimer(timer) < 0)		return;	timer->interval = 0;	internal_add_vtimer(timer);}EXPORT_SYMBOL(add_virt_timer);/* * add_virt_timer_int - add an interval virtual CPU timer */void add_virt_timer_periodic(void *new){	struct vtimer_list *timer;	timer = (struct vtimer_list *)new;	if (prepare_vtimer(timer) < 0)		return;	timer->interval = timer->expires;	internal_add_vtimer(timer);}EXPORT_SYMBOL(add_virt_timer_periodic);/* * If we change a pending timer the function must be called on the CPU * where the timer is running on, e.g. by smp_call_function_on() * * The original mod_timer adds the timer if it is not pending. For compatibility * we do the same. The timer will be added on the current CPU as a oneshot timer. * * returns whether it has modified a pending timer (1) or not (0) */int mod_virt_timer(struct vtimer_list *timer, __u64 expires){	struct vtimer_queue *vt_list;	unsigned long flags;	int cpu;	if (check_vtimer(timer) || !timer->function) {		printk("mod_virt_timer: uninitialized timer\n");		return	-EINVAL;	}	if (!expires || expires > VTIMER_MAX_SLICE) {		printk("mod_virt_timer: invalid expire range\n");		return -EINVAL;	}	/*	 * This is a common optimization triggered by the	 * networking code - if the timer is re-modified	 * to be the same thing then just return:	 */	if (timer->expires == expires && vtimer_pending(timer))		return 1;	cpu = get_cpu();	vt_list = &per_cpu(virt_cpu_timer, cpu);	/* disable interrupts before test if timer is pending */	spin_lock_irqsave(&vt_list->lock, flags);	/* if timer isn't pending add it on the current CPU */	if (!vtimer_pending(timer)) {		spin_unlock_irqrestore(&vt_list->lock, flags);		/* we do not activate an interval timer with mod_virt_timer */		timer->interval = 0;		timer->expires = expires;		timer->cpu = cpu;		internal_add_vtimer(timer);		return 0;	}	/* check if we run on the right CPU */	if (timer->cpu != cpu) {		printk("mod_virt_timer: running on wrong CPU, check your code\n");		spin_unlock_irqrestore(&vt_list->lock, flags);		put_cpu();		return -EINVAL;	}	list_del_init(&timer->entry);	timer->expires = expires;	/* also change the interval if we have an interval timer */	if (timer->interval)		timer->interval = expires;	/* the timer can't expire anymore so we can release the lock */	spin_unlock_irqrestore(&vt_list->lock, flags);	internal_add_vtimer(timer);	return 1;}EXPORT_SYMBOL(mod_virt_timer);/* * delete a virtual timer * * returns whether the deleted timer was pending (1) or not (0) */int del_virt_timer(struct vtimer_list *timer){	unsigned long flags;	struct vtimer_queue *vt_list;	if (check_vtimer(timer)) {		printk("del_virt_timer: timer not initialized\n");		return -EINVAL;	}	/* check if timer is pending */	if (!vtimer_pending(timer))		return 0;	vt_list = &per_cpu(virt_cpu_timer, timer->cpu);	spin_lock_irqsave(&vt_list->lock, flags);	/* we don't interrupt a running timer, just let it expire! */	list_del_init(&timer->entry);	/* last timer removed */	if (list_empty(&vt_list->list)) {		vt_list->to_expire = 0;		vt_list->offset = 0;	}	spin_unlock_irqrestore(&vt_list->lock, flags);	return 1;}EXPORT_SYMBOL(del_virt_timer);/* * Start the virtual CPU timer on the current CPU. */void init_cpu_vtimer(void){	struct vtimer_queue *vt_list;	unsigned long cr0;	/* kick the virtual timer */	S390_lowcore.exit_timer = VTIMER_MAX_SLICE;	S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;	asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));	asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));	__ctl_store(cr0, 0, 0);	cr0 |= 0x400;	__ctl_load(cr0, 0, 0);	vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());	INIT_LIST_HEAD(&vt_list->list);	spin_lock_init(&vt_list->lock);	vt_list->to_expire = 0;	vt_list->offset = 0;	vt_list->idle = 0;}static int vtimer_idle_notify(struct notifier_block *self,			      unsigned long action, void *hcpu){	switch (action) {	case CPU_IDLE:		stop_cpu_timer();		break;	case CPU_NOT_IDLE:		start_cpu_timer();		break;	}	return NOTIFY_OK;}static struct notifier_block vtimer_idle_nb = {	.notifier_call = vtimer_idle_notify,};void __init vtime_init(void){	/* request the cpu timer external interrupt */	if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,					      &ext_int_info_timer) != 0)		panic("Couldn't request external interrupt 0x1005");	if (register_idle_notifier(&vtimer_idle_nb))		panic("Couldn't register idle notifier");	init_cpu_vtimer();}

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
国产亚洲精品资源在线26u| 成人成人成人在线视频| 欧美日韩午夜影院| 天天爽夜夜爽夜夜爽精品视频| 日本精品一区二区三区四区的功能| 亚洲另类在线视频| 欧美日韩精品一区二区天天拍小说| 日韩中文字幕区一区有砖一区| 在线91免费看| 美女一区二区三区| 国产日产欧美一区| 91香蕉视频在线| 午夜视频久久久久久| 精品入口麻豆88视频| 成av人片一区二区| 亚洲成人综合网站| 久久网这里都是精品| 99免费精品视频| 亚洲小少妇裸体bbw| 欧美mv日韩mv| 色诱亚洲精品久久久久久| 亚洲成a人v欧美综合天堂| 日韩欧美一级精品久久| 成人app下载| 日韩av中文字幕一区二区| 国产午夜精品美女毛片视频| 91丨九色丨黑人外教| 日韩av一区二区在线影视| 欧美国产精品v| 欧美三级三级三级| 国产91在线看| 日韩中文欧美在线| 亚洲天堂成人网| 欧美刺激午夜性久久久久久久 | 国产亚洲一区二区在线观看| 91影院在线观看| 久久精品国产一区二区三区免费看| 国产精品福利一区二区三区| 5858s免费视频成人| 成人免费看片app下载| 奇米在线7777在线精品| 亚洲精品视频一区二区| 精品日韩成人av| 欧美三级日本三级少妇99| 成人激情视频网站| 黄页视频在线91| 亚洲chinese男男1069| 中文字幕成人网| 精品日韩一区二区| 91麻豆精品国产91久久久更新时间 | 欧美精品一级二级| 91在线看国产| 成人涩涩免费视频| 韩国一区二区在线观看| 丝袜美腿亚洲色图| 亚洲视频一二区| 中文字幕乱码一区二区免费| 精品捆绑美女sm三区| 欧美日本一道本| 欧美自拍偷拍午夜视频| youjizz国产精品| 成人美女视频在线观看| 韩国理伦片一区二区三区在线播放| 天堂在线亚洲视频| 香蕉乱码成人久久天堂爱免费| 亚洲精品一二三| 一区二区三区四区亚洲| 亚洲女人小视频在线观看| 国产精品高潮久久久久无| 欧美国产精品专区| 欧美国产日本视频| 日本一区二区三级电影在线观看 | 国产精品少妇自拍| 久久久99久久精品欧美| 久久综合给合久久狠狠狠97色69| 日韩一区二区三区观看| 91精品国产综合久久久久久| 欧美日韩激情一区二区三区| 欧美日本视频在线| 91精品国产欧美一区二区| 欧美日本在线看| 欧美一级搡bbbb搡bbbb| 日韩一区二区精品葵司在线| 欧美一级黄色录像| 精品日韩在线一区| 日本一区二区三区电影| 亚洲欧美综合色| 亚洲一卡二卡三卡四卡五卡| 一区二区三区精品在线| 午夜久久久久久久久| 三级不卡在线观看| 国内偷窥港台综合视频在线播放| 国产丶欧美丶日本不卡视频| 成人午夜伦理影院| 色94色欧美sute亚洲线路一久| 欧美性受xxxx| 精品久久国产老人久久综合| 国产午夜亚洲精品午夜鲁丝片 | 亚洲国产裸拍裸体视频在线观看乱了| 亚洲自拍与偷拍| 蜜臀国产一区二区三区在线播放| 麻豆国产91在线播放| 国产成人精品综合在线观看| 91在线观看地址| 91精品国产欧美日韩| 久久久久久黄色| 一区二区三区四区乱视频| 日韩电影一二三区| 国产91精品露脸国语对白| 99国产精品久久久久久久久久 | 亚洲午夜影视影院在线观看| 日韩制服丝袜先锋影音| 国产精品99久久久久久似苏梦涵 | 91在线看国产| 日韩欧美一区电影| 中文字幕亚洲一区二区av在线| 亚洲不卡在线观看| 丁香激情综合国产| 欧美日韩一区二区在线观看视频| 久久嫩草精品久久久精品| 一区二区三区欧美久久| 国产在线一区观看| 91久久免费观看| 久久色.com| 亚洲一区二区三区自拍| 国产成人免费在线视频| 欧美日本韩国一区二区三区视频| 国产欧美视频一区二区| 午夜久久福利影院| 成人福利视频在线| 日韩欧美第一区| 一区二区三区四区五区视频在线观看| 国产一区二区在线视频| 欧美久久久久久蜜桃| 国产精品久久久久久久午夜片| 日本大胆欧美人术艺术动态| 99vv1com这只有精品| 久久综合色鬼综合色| 亚洲国产一区二区三区| 丁香天五香天堂综合| 日韩免费一区二区| 偷拍一区二区三区| 色综合一区二区三区| 久久综合五月天婷婷伊人| 亚洲综合丝袜美腿| 99久久精品国产毛片| 国产亚洲欧美在线| 久久精品国产久精国产| 欧美日韩免费在线视频| 亚洲图片欧美激情| 丁香五精品蜜臀久久久久99网站| 欧美成人高清电影在线| 日韩影视精彩在线| 欧美日精品一区视频| 亚洲免费观看高清在线观看| 成人av手机在线观看| 国产日韩欧美在线一区| 国产一区999| 精品久久久久一区| 美女网站色91| 日韩美一区二区三区| 日本女人一区二区三区| 欧美做爰猛烈大尺度电影无法无天| 亚洲视频免费看| 9久草视频在线视频精品| 中文字幕在线不卡国产视频| 成人美女在线观看| 中文字幕在线一区免费| va亚洲va日韩不卡在线观看| 国产精品福利影院| 99re这里只有精品首页| 综合在线观看色| 91啪在线观看| 亚洲国产精品久久不卡毛片| 欧美三片在线视频观看 | 91.com视频| 日本成人在线电影网| 日韩视频在线观看一区二区| 琪琪久久久久日韩精品| 久久综合久色欧美综合狠狠| 国产综合色视频| 国产欧美日本一区二区三区| 不卡的看片网站| 一区二区三区四区不卡视频| 欧美午夜精品一区| 午夜欧美一区二区三区在线播放 | 久久在线观看免费| 国产大片一区二区| 国产精品电影院| 欧美在线啊v一区| 日韩电影免费在线看| 久久美女艺术照精彩视频福利播放| 国产成人综合在线观看| 亚洲日本韩国一区| 欧美色电影在线| 国产在线精品一区二区| 亚洲欧洲在线观看av| 欧美日韩国产在线播放网站| 久久精品国产亚洲aⅴ| 日韩一区有码在线|