亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频

? 歡迎來到蟲蟲下載站! | ?? 資源下載 ?? 資源專輯 ?? 關于我們
? 蟲蟲下載站

?? rtsched.h

?? Linux2.4.20針對三星公司的s3c2410開發板的內核改造。
?? H
?? 第 1 頁 / 共 3 頁
字號:
/* *  linux/kernel/rtsched.h * *  NOTE: This is a .h file that is mostly source, not the usual convention. *        It is coded this way to allow the depend rules to correctly set *        up the make file dependencies.  This is an alternate scheduler *        that replaces the core scheduler in sched.c.  It does not, however, *        replace most of the static support functions that call schedule. *        By making this an include file for sched.c, all of those functions *        are retained without the need for duplicate code and its attendant *        support issues.  At the same time, keeping it a seperate file allows *        diff and patch to work most cleanly and correctly. * *  Kernel scheduler and related syscalls * *  Copyright (C) 1991, 1992  Linus Torvalds *  Copyright (C) 2000, 2001 MontaVista Software Inc. * *  1998-12-28  Implemented better SMP scheduling by Ingo Molnar *  2000-03-15  Added the Real Time run queue support by George Anzinger *  2000-8-29   Added code to do lazy recalculation of counters  *              by George Anzinger *//* * 'sched.c' is the main kernel file. It contains scheduling primitives * (sleep_on, wakeup, schedule etc) as well as a number of simple system * call functions (type getpid()), which just extract a field from * current-task */#ifndef preempt_disable#define preempt_disable()#define preempt_enable()#define preempt_get_count() 0#define preempt_enable_no_resched()#endif/* * scheduler variables */#define VERSION_DATE "<20011203.1609.50>"/* * We align per-CPU scheduling data on cacheline boundaries, * to prevent cacheline ping-pong. */static union {	struct schedule_data {		struct task_struct * curr;		cycles_t last_schedule;                struct list_head schedule_data_list;                int cpu,effprio;	} schedule_data;	char __pad [SMP_CACHE_BYTES];} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0,{0,0},0,0}}};#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.currstatic void newprio_ready_q(struct task_struct * tptr,int newprio);#ifdef CONFIG_SMPstatic void newprio_executing(struct task_struct *tptr,int newprio);static struct list_head hed_cpu_prio __cacheline_aligned =                                                 LIST_HEAD_INIT(hed_cpu_prio);#endif/* * task_on_rq tests for task actually in the ready queue. * task_on_runque tests for task either on ready queue or being executed * (by virtue of our seting a running tasks run_list.next to 1) */#define task_on_rq(p) ((unsigned)p->run_list.next > 1)static struct list_head rq[MAX_PRI+1]  ____cacheline_aligned;static struct ready_queue {        int recalc;            /* # of counter recalculations on SCHED_OTHER */        int ticks;             /* # of ticks for all in SCHED_OTHER ready Q */} runq ____cacheline_aligned;/* set the bit map up with guard bits below.  This will result in * priority -1 if there are no tasks in the ready queue which will * happen as we are not putting the idle tasks in the ready queue. */static struct {        int guard;        int rq_bit_ary[(MAX_PRI/32) +1];}rq_bits = {-1,{0,0,0,0}};#define rq_bit_map rq_bits.rq_bit_ary        static int high_prio=0;#define Rdy_Q_Hed(pri) &rq[pri]#define PREEMPTION_THRESHOLD 1#define NOT_RT 0   /* Use priority zero for non-RT processes */#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedulestruct kernel_stat kstat;#ifdef CONFIG_SMP/* * At the moment, we will ignor cpus_allowed, primarily because if it were * used, we would have a conflict in the runq.ticks count (i.e. since we * are not scheduleing some tasks, the count would not reflect what is * is really on the list).  Oh, and also, nowhere is there code in the * kernel to set cpus_allowed to anything but -1.  In the long run, we * would like to try seperate lists for each cpu, at which point  * cpus_allowed could be used to direct the task to the proper list. * Well, darn, now there is code that messes with cpus_allowed.  We will change * sometime soon.... */#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])#define can_schedule(p,cpu) \	((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))#else#define idle_task(cpu) (&init_task)#define can_schedule(p,cpu) (1)#endifvoid scheduling_functions_start_here(void) { }/* * This is the function that decides how desirable a process is.. * You can weigh different processes against each other depending * on what CPU they've run on lately etc to try to handle cache * and TLB miss penalties. * * Return values: *	 -1000: never select this *	     0: out of time, recalculate counters (but it might still be *		selected) *	   +ve: "goodness" value (the larger, the better) */static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm){	int weight;	/*	 * goodness is NEVER called for Realtime processes!	 * Realtime process, select the first one on the	 * runqueue (taking priorities within processes	 * into account).	          */	/*	 * Give the process a first-approximation goodness value	 * according to the number of clock-ticks it has left.	 *	 * Don't do any other calculations if the time slice is	 * over or if this is an idle task.	 */	weight = p->counter;	if (weight <= 0)		goto out;			#ifdef CONFIG_SMP	/* Give a largish advantage to the same processor...   */	/* (this is equivalent to penalizing other processors) */	if (p->processor == this_cpu)		weight += PROC_CHANGE_PENALTY;#endif	/* .. and a slight advantage to the current MM */	if (p->mm == this_mm || !p->mm)		weight += 1;	weight += 20 - p->nice;out:	return weight;}/* * the 'goodness value' of replacing a process on a given CPU. * positive value means 'replace', zero or negative means 'dont'. */static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu){	return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);}/* * This is ugly, but reschedule_idle() is very timing-critical. * We are called with the runqueue spinlock held and we must * not claim the tasklist_lock. */static FASTCALL(void reschedule_idle(struct task_struct * p));static void reschedule_idle(struct task_struct * p){#ifdef CONFIG_SMP	int this_cpu = smp_processor_id(), target_cpu;	struct task_struct  *target_tsk;        struct list_head *cptr;        struct schedule_data *sch;	int  best_cpu;	/*	 * shortcut if the woken up task's last CPU is	 * idle now.	 */	best_cpu = p->processor;	target_tsk = idle_task(best_cpu);	if (cpu_curr(best_cpu) == target_tsk)		goto preempt_now;        /*         * For real time, the choice is simple.  We just check         * if the most available processor is working on a lower         * priority task.  If so we bounce it, if not, there is         * nothing more important than what we are doing.         * Note that this will pick up any idle cpu(s) we may         * have as they will have effprio of -1.         */        cptr = hed_cpu_prio.prev;        sch = list_entry(cptr,                         struct schedule_data,                         schedule_data_list);        target_tsk = sch->curr;        if (p->effprio > sch->effprio){                goto preempt_now;        }        /*         * If all cpus are doing real time and we failed         * above, then there is no help for this task.         */        if ( sch->effprio )                 goto out_no_target;               	/*         * Non-real time contender and one or more processors         * doing non-real time things.         * So we have a non-real time task contending among         * other non-real time tasks on one or more processors         * We know we have no idle cpus.         */	/*	 * No CPU is idle, but maybe this process has enough priority	 * to preempt it's preferred CPU.	 */	target_tsk = cpu_curr(best_cpu);	if (target_tsk->effprio == 0 &&            preemption_goodness(target_tsk, p, best_cpu) > 0)		goto preempt_now;        for (; cptr != &hed_cpu_prio;  cptr = cptr->prev ){                sch =list_entry(cptr,                                struct schedule_data,                                schedule_data_list);                if (sch->effprio != 0)                         break;                if (sch->cpu != best_cpu){                        target_tsk = sch->curr;                        if ( preemption_goodness(target_tsk, p, sch->cpu) >                              PREEMPTION_THRESHOLD)                                goto  preempt_now;		}               	}	out_no_target:	return;preempt_now:	target_cpu = target_tsk->processor;	target_tsk->need_resched = 1;	/*	 * the APIC stuff can go outside of the lock because	 * it uses no task information, only CPU#.	 */	if ((target_cpu != this_cpu)            && (target_tsk != idle_task(target_cpu)))		smp_send_reschedule(target_cpu);	return;#else /* UP */	struct task_struct *tsk;	tsk = cpu_curr(0);	if ((high_prio > tsk->effprio) ||            (!tsk->effprio && preemption_goodness(tsk, p, 0) >              PREEMPTION_THRESHOLD)){		tsk->need_resched = 1;	}#endif}/* * This routine maintains the list of smp processors.  This is  * a by directional list maintained in priority order.  The above * code used this list to find a processor to use for a new task. * The search will be backward thru the list as we want to take  * the lowest prioity cpu first.  We put equal prioities such that * the new one will be ahead of the old, so the new should stay * around a bit longer.  */#ifdef CONFIG_SMPstatic inline void re_queue_cpu(struct task_struct *next,                                struct schedule_data *sch){        struct list_head *cpuptr;        list_del(&sch->schedule_data_list);        sch->effprio = next->effprio;        cpuptr = hed_cpu_prio.next;        while (cpuptr != &hed_cpu_prio &&               sch->effprio < list_entry(cpuptr,                                         struct schedule_data,                                         schedule_data_list)->effprio                )                 cpuptr = cpuptr->next;        list_add_tail(&sch->schedule_data_list,cpuptr);        next->newprio = &newprio_executing;}#else#define re_queue_cpu(a,b)#endif/* * Careful! * * This has to add the process to the _beginning_ of the * run-queue, not the end. See the comment about "This is * subtle" in the scheduler proper.. *  * For real time tasks we do this a bit differently.  We  * keep a priority list of ready tasks.  We remove tasks  * from this list when they are running so a running real * time task will not be in either the ready list or the run * queue.  Also, in the name of speed and real time, only * priority is important so we spend a few bytes on the queue. * We have a doubly linked list for each priority.  This makes * Insert and removal very fast.  We also keep a bit map of * the priority queues where a bit says if the queue is empty * or not.  We also keep loose track of the highest priority * queue that is currently occupied.  This high_prio mark  * is updated when a higher priority task enters the ready * queue and only goes down when we look for a task in the * ready queue at high_prio and find none.  Then, and only * then, we examine the bit map to find the true high_prio. */#define BF 31  /* bit flip constant */#define   set_rq_bit(bit)    set_bit(BF-((bit)&0x1f),&rq_bit_map[(bit) >> 5])#define clear_rq_bit(bit)  clear_bit(BF-((bit)&0x1f),&rq_bit_map[(bit) >> 5])static inline void _del_from_runqueue(struct task_struct * p){	nr_running--;        list_del( &p->run_list );        if (list_empty(Rdy_Q_Hed(p->effprio))){                clear_rq_bit(p->effprio);        }	/*                  p->run_list.next = NULL; !=0 prevents requeue */	p->run_list.next = NULL;        p->newprio = NULL;        if( !p->effprio) runq.ticks -= p->counter;}/* Exported for main.c, also used in init code here */void __del_from_runqueue(struct task_struct * p){        _del_from_runqueue(p);}static inline struct task_struct * get_next_task(struct task_struct * prev,                                                 int this_cpu){        struct list_head *next, *rqptr;        struct task_struct *it=0;        int *i,c,oldcounter; repeat_schedule:        rqptr = Rdy_Q_Hed(high_prio);        next = rqptr->next;        if (unlikely( next == rqptr)){                 for (i=&rq_bit_map[MAX_PRI/32],high_prio=BF+((MAX_PRI/32)*32);                     (*i == 0);high_prio -=32,i--);                high_prio -= ffz(~*i);                if (unlikely(high_prio < 0)){                        /*                         * No tasks to run, return this cpu's idle task                          * It is not in the ready queue, so no need to remove it.                         * But first make sure its priority keeps it out of                          * the way.                         */                        high_prio = 0;                        it = idle_task(this_cpu);                        it->effprio = -1;                        return it;                }                goto repeat_schedule;        }        /*         * If there is only one task on the list, it is a no brainer.         * But really, this also prevents us from looping on recalulation         * if the one and only task is trying to yield.  These sort of          * loops are NOT_FUN.  Note: we use likely() to tilt toward          * real-time tasks, even thou they are, usually unlikely.  We         * are, after all, a real time scheduler.         */        if ( likely(high_prio || next->next == rqptr)){                 it = list_entry(next, struct task_struct, run_list); back_from_figure_non_rt_next:                _del_from_runqueue(it);                return it;                       }        /*         * Here we set up a SCHED_OTHER yield.  Note that for other policies         * yield is handled else where.  This means we can use == and =          * instead of & and &= to test and clear the flag.  If the prev          * task has all the runq.ticks, then we just do the recaculation         * version and let the winner take all (yield fails).  Otherwise         * we fource the counter to zero for the loop and put it back         * after we found some other task.  We must remember to update

?? 快捷鍵說明

復制代碼 Ctrl + C
搜索代碼 Ctrl + F
全屏模式 F11
切換主題 Ctrl + Shift + D
顯示快捷鍵 ?
增大字號 Ctrl + =
減小字號 Ctrl + -
亚洲欧美第一页_禁久久精品乱码_粉嫩av一区二区三区免费野_久草精品视频
欧美日韩日本视频| 成人小视频在线| 欧美精品丝袜久久久中文字幕| 亚洲图片你懂的| 欧美在线免费播放| 日产欧产美韩系列久久99| 欧美一区二区三区在线视频 | 国产精品人成在线观看免费 | 亚洲一区二区精品3399| 91极品视觉盛宴| 日本少妇一区二区| 精品久久国产字幕高潮| 风间由美一区二区三区在线观看| 国产精品系列在线| 日本精品一区二区三区四区的功能| 午夜精品久久久久久久久久 | 高清av一区二区| 亚洲精品乱码久久久久久久久| 在线不卡免费av| 成人一区二区三区中文字幕| 亚洲精品视频免费看| 欧美精品在线一区二区三区| 国产伦精一区二区三区| 一区二区三区在线视频免费 | 中文字幕在线观看不卡视频| 欧美色中文字幕| 国产精品18久久久久久vr | 亚洲国产精品欧美一二99| 91精品在线一区二区| 成人黄色一级视频| 视频一区视频二区中文| 欧美激情在线一区二区| 在线电影一区二区三区| 成人av免费在线观看| 奇米一区二区三区| 亚洲卡通欧美制服中文| 久久夜色精品一区| 欧美午夜精品一区| a美女胸又www黄视频久久| 免费在线观看一区二区三区| 樱桃国产成人精品视频| 久久夜色精品国产噜噜av| 欧美日韩一级二级三级| 波多野洁衣一区| 亚洲精品乱码久久久久久| 91黄视频在线| 国产在线麻豆精品观看| 一二三四社区欧美黄| 久久久久综合网| 欧美一区二区三区在| 91一区二区三区在线观看| 国产精品99久久久| 麻豆精品一区二区综合av| 亚洲国产一区二区三区青草影视| 国产日本欧美一区二区| 日韩欧美激情四射| 在线不卡免费av| 欧美撒尿777hd撒尿| 色综合久久中文综合久久97| 国产成人高清视频| 国产综合色在线| 另类欧美日韩国产在线| 免费高清在线一区| 日本成人在线不卡视频| 香蕉成人伊视频在线观看| 亚洲欧美经典视频| 亚洲视频免费在线| 亚洲码国产岛国毛片在线| ...av二区三区久久精品| 国产精品理论在线观看| 国产精品视频一二三区| 久久久国产精华| 久久久精品综合| 久久久精品欧美丰满| 久久精品亚洲一区二区三区浴池| 精品电影一区二区| 国产欧美精品一区二区色综合朱莉| 精品91自产拍在线观看一区| 欧美精品一区二区久久婷婷 | 激情久久五月天| 激情综合色播激情啊| 久久不见久久见中文字幕免费| 日韩和欧美一区二区| 青青草原综合久久大伊人精品 | 亚洲va天堂va国产va久| 亚洲猫色日本管| 一区二区激情视频| 亚洲国产视频网站| 视频一区视频二区中文| 日本亚洲一区二区| 国产综合成人久久大片91| 国产精品亚洲а∨天堂免在线| 国产91丝袜在线播放0| 成人国产精品免费观看视频| 91丨porny丨户外露出| 欧美日韩一区中文字幕| 日韩欧美一区二区视频| 久久久精品综合| 亚洲乱码国产乱码精品精98午夜 | 蜜乳av一区二区三区| 韩国v欧美v亚洲v日本v| 成人av在线一区二区| 91久久香蕉国产日韩欧美9色| 欧美日韩国产成人在线91| 日韩视频免费直播| 中文字幕在线观看一区二区| 香港成人在线视频| 极品少妇xxxx精品少妇偷拍| 成年人国产精品| 91精品婷婷国产综合久久| 欧美—级在线免费片| 一区二区国产盗摄色噜噜| 捆绑变态av一区二区三区| 99久久99精品久久久久久| 欧美高清视频在线高清观看mv色露露十八| 欧美sm美女调教| 亚洲麻豆国产自偷在线| 毛片av一区二区| 91蜜桃免费观看视频| 日韩免费在线观看| 亚洲综合在线第一页| 国产一区二区三区在线观看免费 | www.亚洲免费av| 欧美性做爰猛烈叫床潮| 精品999在线播放| 亚洲综合色成人| 国产精品正在播放| 欧美理论在线播放| 欧美高清在线视频| 六月丁香综合在线视频| 日本韩国精品在线| 国产视频亚洲色图| 日韩av不卡一区二区| 91麻豆国产福利在线观看| 久久天天做天天爱综合色| 亚洲地区一二三色| 色偷偷成人一区二区三区91| 久久久亚洲精品石原莉奈| 日韩制服丝袜先锋影音| 一本到一区二区三区| 国产精品少妇自拍| 国产乱人伦精品一区二区在线观看 | 中文字幕一区二区三区在线观看| 男人的天堂久久精品| 欧美自拍偷拍一区| 亚洲免费视频成人| 成人av电影在线网| 国产午夜精品一区二区三区视频| 日韩激情av在线| 欧美日韩精品一区视频| 亚洲精品欧美在线| 99热这里都是精品| 中文字幕五月欧美| 不卡av在线网| 亚洲国产精品精华液ab| 国产一区二区毛片| 久久影院电视剧免费观看| 精品一区二区国语对白| 日韩视频一区在线观看| 免费观看日韩电影| 日韩一区二区在线播放| 免费av成人在线| 日韩一区二区三区观看| 蜜臀av在线播放一区二区三区 | 在线视频你懂得一区| 一区二区三区91| 色哟哟日韩精品| 一区二区三区四区五区视频在线观看| 不卡高清视频专区| 亚洲三级免费观看| 日本韩国欧美国产| 午夜视频在线观看一区| 欧美一区二区人人喊爽| 日本va欧美va精品| 久久综合久久鬼色中文字| 国产精品一区在线观看乱码| 国产欧美日韩不卡免费| youjizz久久| 一区二区三区日韩在线观看| 欧洲国产伦久久久久久久| 丝袜美腿一区二区三区| 日韩精品一区二区三区四区| 国产真实乱对白精彩久久| 欧美激情一区在线观看| 色婷婷久久久久swag精品| 亚洲成人精品一区| 精品国产免费一区二区三区四区 | 亚洲国产精品成人久久综合一区| 成人av电影在线| 一区二区三区四区国产精品| 精品视频一区 二区 三区| 蜜桃av一区二区三区| 国产亚洲婷婷免费| 色婷婷av一区二区| 日韩va欧美va亚洲va久久| 国产人成亚洲第一网站在线播放| 97精品久久久午夜一区二区三区| 亚洲成av人**亚洲成av**| 精品欧美一区二区三区精品久久| 成人精品一区二区三区中文字幕|