亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? posix-cpu-timers.c

?? linux 2.6.19 kernel source code before patching
?? C
?? 第 1 頁 / 共 3 頁
字號:
	while (!list_empty(timers)) {		struct cpu_timer_list *t = list_first_entry(timers,						      struct cpu_timer_list,						      entry);		if (!--maxfire || sched_time < t->expires.sched) {			sched_expires = t->expires.sched;			break;		}		t->firing = 1;		list_move_tail(&t->entry, firing);	}	/*	 * Check for the special case process timers.	 */	if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {		if (cputime_ge(ptime, sig->it_prof_expires)) {			/* ITIMER_PROF fires and reloads.  */			sig->it_prof_expires = sig->it_prof_incr;			if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {				sig->it_prof_expires = cputime_add(					sig->it_prof_expires, ptime);			}			__group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);		}		if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&		    (cputime_eq(prof_expires, cputime_zero) ||		     cputime_lt(sig->it_prof_expires, prof_expires))) {			prof_expires = sig->it_prof_expires;		}	}	if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {		if (cputime_ge(utime, sig->it_virt_expires)) {			/* ITIMER_VIRTUAL fires and reloads.  */			sig->it_virt_expires = sig->it_virt_incr;			if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {				sig->it_virt_expires = cputime_add(					sig->it_virt_expires, utime);			}			__group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);		}		if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&		    (cputime_eq(virt_expires, cputime_zero) ||		     cputime_lt(sig->it_virt_expires, virt_expires))) {			virt_expires = sig->it_virt_expires;		}	}	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {		unsigned long psecs = cputime_to_secs(ptime);		cputime_t x;		if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {			/*			 * At the hard limit, we just die.			 * No need to calculate anything else now.			 */			__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);			return;		}		if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {			/*			 * At the soft limit, send a SIGXCPU every second.			 */			__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);			if (sig->rlim[RLIMIT_CPU].rlim_cur			    < sig->rlim[RLIMIT_CPU].rlim_max) {				sig->rlim[RLIMIT_CPU].rlim_cur++;			}		}		x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);		if (cputime_eq(prof_expires, cputime_zero) ||		    cputime_lt(x, prof_expires)) {			prof_expires = x;		}	}	if (!cputime_eq(prof_expires, cputime_zero) ||	    !cputime_eq(virt_expires, cputime_zero) ||	    sched_expires != 0) {		/*		 * Rebalance the threads' expiry times for the remaining		 * process CPU timers.		 */		cputime_t prof_left, virt_left, ticks;		unsigned long long sched_left, sched;		const unsigned int nthreads = atomic_read(&sig->live);		if (!nthreads)			return;		prof_left = cputime_sub(prof_expires, utime);		prof_left = cputime_sub(prof_left, stime);		prof_left = cputime_div_non_zero(prof_left, nthreads);		virt_left = cputime_sub(virt_expires, utime);		virt_left = cputime_div_non_zero(virt_left, nthreads);		if (sched_expires) {			sched_left = sched_expires - sched_time;			do_div(sched_left, nthreads);			sched_left = max_t(unsigned long long, sched_left, 1);		} else {			sched_left = 0;		}		t = tsk;		do {			if (unlikely(t->flags & PF_EXITING))				continue;			ticks = cputime_add(cputime_add(t->utime, t->stime),					    prof_left);			if (!cputime_eq(prof_expires, cputime_zero) &&			    (cputime_eq(t->it_prof_expires, cputime_zero) ||			     cputime_gt(t->it_prof_expires, ticks))) {				t->it_prof_expires = ticks;			}			ticks = cputime_add(t->utime, virt_left);			if (!cputime_eq(virt_expires, cputime_zero) &&			    (cputime_eq(t->it_virt_expires, cputime_zero) ||			     cputime_gt(t->it_virt_expires, ticks))) {				t->it_virt_expires = ticks;			}			sched = t->sched_time + sched_left;			if (sched_expires && (t->it_sched_expires == 0 ||					      t->it_sched_expires > sched)) {				t->it_sched_expires = sched;			}		} while ((t = next_thread(t)) != tsk);	}}/* * This is called from the signal code (via do_schedule_next_timer) * when the last timer signal was delivered and we have to reload the timer. */void posix_cpu_timer_schedule(struct k_itimer *timer){	struct task_struct *p = timer->it.cpu.task;	union cpu_time_count now;	if (unlikely(p == NULL))		/*		 * The task was cleaned up already, no future firings.		 */		goto out;	/*	 * Fetch the current sample and update the timer's expiry time.	 */	if (CPUCLOCK_PERTHREAD(timer->it_clock)) {		cpu_clock_sample(timer->it_clock, p, &now);		bump_cpu_timer(timer, now);		if (unlikely(p->exit_state)) {			clear_dead_task(timer, now);			goto out;		}		read_lock(&tasklist_lock); /* arm_timer needs it.  */	} else {		read_lock(&tasklist_lock);		if (unlikely(p->signal == NULL)) {			/*			 * The process has been reaped.			 * We can't even collect a sample any more.			 */			put_task_struct(p);			timer->it.cpu.task = p = NULL;			timer->it.cpu.expires.sched = 0;			goto out_unlock;		} else if (unlikely(p->exit_state) && thread_group_empty(p)) {			/*			 * We've noticed that the thread is dead, but			 * not yet reaped.  Take this opportunity to			 * drop our task ref.			 */			clear_dead_task(timer, now);			goto out_unlock;		}		cpu_clock_sample_group(timer->it_clock, p, &now);		bump_cpu_timer(timer, now);		/* Leave the tasklist_lock locked for the call below.  */	}	/*	 * Now re-arm for the new expiry time.	 */	arm_timer(timer, now);out_unlock:	read_unlock(&tasklist_lock);out:	timer->it_overrun_last = timer->it_overrun;	timer->it_overrun = -1;	++timer->it_requeue_pending;}/* * This is called from the timer interrupt handler.  The irq handler has * already updated our counts.  We need to check if any timers fire now. * Interrupts are disabled. */void run_posix_cpu_timers(struct task_struct *tsk){	LIST_HEAD(firing);	struct k_itimer *timer, *next;	BUG_ON(!irqs_disabled());#define UNEXPIRED(clock) \		(cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \		 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))	if (UNEXPIRED(prof) && UNEXPIRED(virt) &&	    (tsk->it_sched_expires == 0 ||	     tsk->sched_time < tsk->it_sched_expires))		return;#undef	UNEXPIRED	/*	 * Double-check with locks held.	 */	read_lock(&tasklist_lock);	if (likely(tsk->signal != NULL)) {		spin_lock(&tsk->sighand->siglock);		/*		 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]		 * all the timers that are firing, and put them on the firing list.		 */		check_thread_timers(tsk, &firing);		check_process_timers(tsk, &firing);		/*		 * We must release these locks before taking any timer's lock.		 * There is a potential race with timer deletion here, as the		 * siglock now protects our private firing list.  We have set		 * the firing flag in each timer, so that a deletion attempt		 * that gets the timer lock before we do will give it up and		 * spin until we've taken care of that timer below.		 */		spin_unlock(&tsk->sighand->siglock);	}	read_unlock(&tasklist_lock);	/*	 * Now that all the timers on our list have the firing flag,	 * noone will touch their list entries but us.  We'll take	 * each timer's lock before clearing its firing flag, so no	 * timer call will interfere.	 */	list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {		int firing;		spin_lock(&timer->it_lock);		list_del_init(&timer->it.cpu.entry);		firing = timer->it.cpu.firing;		timer->it.cpu.firing = 0;		/*		 * The firing flag is -1 if we collided with a reset		 * of the timer, which already reported this		 * almost-firing as an overrun.  So don't generate an event.		 */		if (likely(firing >= 0)) {			cpu_timer_fire(timer);		}		spin_unlock(&timer->it_lock);	}}/* * Set one of the process-wide special case CPU timers. * The tasklist_lock and tsk->sighand->siglock must be held by the caller. * The oldval argument is null for the RLIMIT_CPU timer, where *newval is * absolute; non-null for ITIMER_*, where *newval is relative and we update * it to be absolute, *oldval is absolute and we update it to be relative. */void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,			   cputime_t *newval, cputime_t *oldval){	union cpu_time_count now;	struct list_head *head;	BUG_ON(clock_idx == CPUCLOCK_SCHED);	cpu_clock_sample_group_locked(clock_idx, tsk, &now);	if (oldval) {		if (!cputime_eq(*oldval, cputime_zero)) {			if (cputime_le(*oldval, now.cpu)) {				/* Just about to fire. */				*oldval = jiffies_to_cputime(1);			} else {				*oldval = cputime_sub(*oldval, now.cpu);			}		}		if (cputime_eq(*newval, cputime_zero))			return;		*newval = cputime_add(*newval, now.cpu);		/*		 * If the RLIMIT_CPU timer will expire before the		 * ITIMER_PROF timer, we have nothing else to do.		 */		if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur		    < cputime_to_secs(*newval))			return;	}	/*	 * Check whether there are any process timers already set to fire	 * before this one.  If so, we don't have anything more to do.	 */	head = &tsk->signal->cpu_timers[clock_idx];	if (list_empty(head) ||	    cputime_ge(list_first_entry(head,				  struct cpu_timer_list, entry)->expires.cpu,		       *newval)) {		/*		 * Rejigger each thread's expiry time so that one will		 * notice before we hit the process-cumulative expiry time.		 */		union cpu_time_count expires = { .sched = 0 };		expires.cpu = *newval;		process_timer_rebalance(tsk, clock_idx, expires, now);	}}static int do_cpu_nanosleep(const clockid_t which_clock, int flags,			    struct timespec *rqtp, struct itimerspec *it){	struct k_itimer timer;	int error;	/*	 * Set up a temporary timer and then wait for it to go off.	 */	memset(&timer, 0, sizeof timer);	spin_lock_init(&timer.it_lock);	timer.it_clock = which_clock;	timer.it_overrun = -1;	error = posix_cpu_timer_create(&timer);	timer.it_process = current;	if (!error) {		static struct itimerspec zero_it;		memset(it, 0, sizeof *it);		it->it_value = *rqtp;		spin_lock_irq(&timer.it_lock);		error = posix_cpu_timer_set(&timer, flags, it, NULL);		if (error) {			spin_unlock_irq(&timer.it_lock);			return error;		}		while (!signal_pending(current)) {			if (timer.it.cpu.expires.sched == 0) {				/*				 * Our timer fired and was reset.				 */				spin_unlock_irq(&timer.it_lock);				return 0;			}			/*			 * Block until cpu_timer_fire (or a signal) wakes us.			 */			__set_current_state(TASK_INTERRUPTIBLE);			spin_unlock_irq(&timer.it_lock);			schedule();			spin_lock_irq(&timer.it_lock);		}		/*		 * We were interrupted by a signal.		 */		sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);		posix_cpu_timer_set(&timer, 0, &zero_it, it);		spin_unlock_irq(&timer.it_lock);		if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {			/*			 * It actually did fire already.			 */			return 0;		}		error = -ERESTART_RESTARTBLOCK;	}	return error;}int posix_cpu_nsleep(const clockid_t which_clock, int flags,		     struct timespec *rqtp, struct timespec __user *rmtp){	struct restart_block *restart_block =	    &current_thread_info()->restart_block;	struct itimerspec it;	int error;	/*	 * Diagnose required errors first.	 */	if (CPUCLOCK_PERTHREAD(which_clock) &&	    (CPUCLOCK_PID(which_clock) == 0 ||	     CPUCLOCK_PID(which_clock) == current->pid))		return -EINVAL;	error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);	if (error == -ERESTART_RESTARTBLOCK) {	       	if (flags & TIMER_ABSTIME)			return -ERESTARTNOHAND;		/*	 	 * Report back to the user the time still remaining.	 	 */		if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))			return -EFAULT;		restart_block->fn = posix_cpu_nsleep_restart;		restart_block->arg0 = which_clock;		restart_block->arg1 = (unsigned long) rmtp;		restart_block->arg2 = rqtp->tv_sec;		restart_block->arg3 = rqtp->tv_nsec;	}	return error;}long posix_cpu_nsleep_restart(struct restart_block *restart_block){	clockid_t which_clock = restart_block->arg0;	struct timespec __user *rmtp;	struct timespec t;	struct itimerspec it;	int error;	rmtp = (struct timespec __user *) restart_block->arg1;	t.tv_sec = restart_block->arg2;	t.tv_nsec = restart_block->arg3;	restart_block->fn = do_no_restart_syscall;	error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);	if (error == -ERESTART_RESTARTBLOCK) {		/*	 	 * Report back to the user the time still remaining.	 	 */		if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))			return -EFAULT;		restart_block->fn = posix_cpu_nsleep_restart;		restart_block->arg0 = which_clock;		restart_block->arg1 = (unsigned long) rmtp;		restart_block->arg2 = t.tv_sec;		restart_block->arg3 = t.tv_nsec;	}	return error;}#define PROCESS_CLOCK	MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)#define THREAD_CLOCK	MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)static int process_cpu_clock_getres(const clockid_t which_clock,				    struct timespec *tp){	return posix_cpu_clock_getres(PROCESS_CLOCK, tp);}static int process_cpu_clock_get(const clockid_t which_clock,				 struct timespec *tp){	return posix_cpu_clock_get(PROCESS_CLOCK, tp);}static int process_cpu_timer_create(struct k_itimer *timer){	timer->it_clock = PROCESS_CLOCK;	return posix_cpu_timer_create(timer);}static int process_cpu_nsleep(const clockid_t which_clock, int flags,			      struct timespec *rqtp,			      struct timespec __user *rmtp){	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);}static long process_cpu_nsleep_restart(struct restart_block *restart_block){	return -EINVAL;}static int thread_cpu_clock_getres(const clockid_t which_clock,				   struct timespec *tp){	return posix_cpu_clock_getres(THREAD_CLOCK, tp);}static int thread_cpu_clock_get(const clockid_t which_clock,				struct timespec *tp){	return posix_cpu_clock_get(THREAD_CLOCK, tp);}static int thread_cpu_timer_create(struct k_itimer *timer){	timer->it_clock = THREAD_CLOCK;	return posix_cpu_timer_create(timer);}static int thread_cpu_nsleep(const clockid_t which_clock, int flags,			      struct timespec *rqtp, struct timespec __user *rmtp){	return -EINVAL;}static long thread_cpu_nsleep_restart(struct restart_block *restart_block){	return -EINVAL;}static __init int init_posix_cpu_timers(void){	struct k_clock process = {		.clock_getres = process_cpu_clock_getres,		.clock_get = process_cpu_clock_get,		.clock_set = do_posix_clock_nosettime,		.timer_create = process_cpu_timer_create,		.nsleep = process_cpu_nsleep,		.nsleep_restart = process_cpu_nsleep_restart,	};	struct k_clock thread = {		.clock_getres = thread_cpu_clock_getres,		.clock_get = thread_cpu_clock_get,		.clock_set = do_posix_clock_nosettime,		.timer_create = thread_cpu_timer_create,		.nsleep = thread_cpu_nsleep,		.nsleep_restart = thread_cpu_nsleep_restart,	};	register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);	register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);	return 0;}__initcall(init_posix_cpu_timers);

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
国产美女一区二区三区| 三级久久三级久久久| 视频一区视频二区中文字幕| 丁香啪啪综合成人亚洲小说| 日韩欧美国产精品一区| 日韩精品国产欧美| 91精品欧美福利在线观看| 亚洲午夜久久久久| 欧美在线你懂的| 亚州成人在线电影| 91成人免费网站| 蜜臀av性久久久久蜜臀aⅴ | 国产精品乱人伦一区二区| 国产一区啦啦啦在线观看| 2019国产精品| 99re这里都是精品| 日韩一区欧美二区| 欧美一区二区三区播放老司机| 成人欧美一区二区三区小说| 国产成+人+日韩+欧美+亚洲| 亚洲天堂网中文字| 欧美另类z0zxhd电影| 精品一区二区影视| 日韩美女视频一区| 在线成人免费观看| 成人久久视频在线观看| 日韩不卡一区二区三区| 中日韩免费视频中文字幕| 欧美日韩情趣电影| 成人av网址在线观看| 欧美国产欧美综合| 国产裸体歌舞团一区二区| 自拍偷拍亚洲欧美日韩| 久久伊99综合婷婷久久伊| 91亚洲国产成人精品一区二三| 亚洲图片自拍偷拍| 中文字幕在线观看一区二区| 欧美一区二区在线观看| 欧美亚洲精品一区| 成人av在线网| 成人激情电影免费在线观看| 美日韩一区二区| 久久99精品久久久久久久久久久久 | 欧美日本一道本| 欧美性色黄大片| 在线视频你懂得一区二区三区| 国产精品久久久久精k8| 欧美在线影院一区二区| 日韩电影在线观看一区| 天天操天天干天天综合网| 亚洲综合免费观看高清在线观看| 精品国产精品网麻豆系列| 日韩一级黄色片| 欧美性videosxxxxx| 欧美欧美午夜aⅴ在线观看| 欧美中文字幕一区二区三区亚洲| 在线视频综合导航| 欧美性猛片xxxx免费看久爱| 3d成人h动漫网站入口| 欧美日韩亚洲另类| 久久久久9999亚洲精品| 国产欧美视频一区二区三区| 国产精品久久久爽爽爽麻豆色哟哟 | 国产在线精品一区二区三区不卡| 轻轻草成人在线| 黄页视频在线91| 国产精品亚洲а∨天堂免在线| 成人三级伦理片| 欧美精品久久一区二区三区| 国产精品污网站| 久久精品国产99久久6| www.欧美精品一二区| 欧美三级三级三级| 中文字幕精品—区二区四季| 午夜国产精品一区| 91美女精品福利| ww亚洲ww在线观看国产| 日韩和欧美一区二区三区| 99久久伊人精品| 国产欧美日韩三级| 国产成人免费视| 26uuu久久综合| 全国精品久久少妇| 欧美午夜精品理论片a级按摩| 国产亚洲欧美日韩在线一区| 蜜桃视频第一区免费观看| av成人免费在线| 国产精品久久久久久福利一牛影视| 久久99精品久久久| 日韩精品专区在线影院观看| 蜜臀av在线播放一区二区三区| 6080午夜不卡| 男人的j进女人的j一区| 91精品婷婷国产综合久久性色| 一区二区三区欧美| 6080午夜不卡| 国产一区91精品张津瑜| 91国产精品成人| 国产人成一区二区三区影院| 国产91精品一区二区麻豆网站 | 亚洲午夜一区二区| 欧美福利视频导航| 国产成人精品免费视频网站| 国产精品美女久久久久aⅴ | 亚洲精品ww久久久久久p站| 色欧美片视频在线观看在线视频| 天天影视涩香欲综合网| 精品国产sm最大网站| 色综合天天综合色综合av | 欧美一级精品大片| 丁香婷婷综合色啪| 日韩av中文在线观看| 国产片一区二区| 99精品视频一区二区| 五月激情六月综合| 国产网站一区二区三区| 欧美巨大另类极品videosbest | 欧美国产日韩在线观看| 色综合天天视频在线观看| 国产精品资源在线看| 亚洲自拍与偷拍| 亚洲乱码国产乱码精品精98午夜| 欧美日韩精品是欧美日韩精品| 成人午夜视频在线观看| 极品少妇xxxx偷拍精品少妇| 午夜视频一区二区三区| 亚洲欧美激情插| 欧美大片日本大片免费观看| 国产精品一区二区你懂的| 首页综合国产亚洲丝袜| 婷婷综合另类小说色区| 亚洲欧美日韩国产综合| 国产欧美日韩视频在线观看| 欧美日韩不卡视频| 欧美日本一区二区三区四区| 欧美在线观看禁18| 欧美日本在线播放| 91精品国产欧美一区二区| 日韩欧美中文字幕一区| 精品噜噜噜噜久久久久久久久试看 | 亚洲色图在线看| 亚洲综合免费观看高清完整版在线| 亚洲一区二区三区激情| 日产欧产美韩系列久久99| 美女网站一区二区| 久久99精品久久久久婷婷| 国产成人亚洲精品狼色在线 | 国产精品自在欧美一区| 成人小视频免费观看| 在线播放一区二区三区| 欧美激情综合在线| 亚洲欧美综合网| 激情小说亚洲一区| 91一区一区三区| 久久精品视频免费观看| 午夜久久久久久| 国产精品2024| 日韩欧美一二区| 精品88久久久久88久久久| 一区二区三区四区乱视频| 久久国产免费看| 日韩一区二区不卡| 亚洲成人av电影| 欧美伊人精品成人久久综合97| 久久综合色综合88| 久久电影网电视剧免费观看| 国产亚洲欧美日韩在线一区| 一区二区三区日韩欧美精品| 成人国产在线观看| 成人欧美一区二区三区黑人麻豆 | 精品在线视频一区| 亚洲精品一区二区三区香蕉| 国产精品亚洲第一区在线暖暖韩国 | 日韩av午夜在线观看| 欧美第一区第二区| thepron国产精品| 日本不卡视频在线| 亚洲欧洲精品一区二区三区| 欧美精品久久一区| 成人app软件下载大全免费| 亚洲精品乱码久久久久久久久 | 秋霞成人午夜伦在线观看| 国产亚洲欧美一级| 91麻豆精品国产91久久久久久| 国模套图日韩精品一区二区| √…a在线天堂一区| 日韩欧美综合在线| 欧美天堂亚洲电影院在线播放| 国产成人精品1024| 免费观看91视频大全| 亚洲精品亚洲人成人网 | 国产亚洲va综合人人澡精品| 色婷婷综合久久久| 成人午夜在线免费| 国产精品一二一区| 激情图片小说一区| 日本成人中文字幕| 亚洲va韩国va欧美va精品 | 日本一区二区三区在线不卡| 91精品国产日韩91久久久久久|