?? rtl_sched.c
字號(hào):
/* TODO The problem with task queues is that they optimize for large numbers of tasks. I think that large numbers of tasks requires a total redesign of scheduler and that the scheduler should optimize for < 40 threads and really for <10 *//* * RTLinux default scheduler * RTLinux has a modular scheduler and this may be replaced if desired. * * Written by Michael Barabanov, Victor Yodaiken * Copyright (C) Finite State Machine Labs Inc., 1998,1999 * Released under the terms of the GNU General Public License Version 2 * *//* ChangeLog * Dec 18 2002 Der Herr Hofrat <der.herr@hofr.at> * moved setting of resume time into pthread_wait_np * scheduler reordered to run in a single loop through the task-list * * Dec 22 2002 Josep Vidal <jvidal@disca.upv.es> (OCERA) * Added POSIX signals support (for user signals) * Added POSIX timers support * Fixed stack overflow when sending RTL_SIGNAL_SUSPEND signal. * Fixed mutex bug when setting t->do_abort=0 on do_signal. * Fixed long long casting in usleep pointed by P. Mendoza. * See examples/bug directory for more info. * Thanks to I.Ripoll, P. Mendoza & P. Perez. */ #include <linux/module.h>#include <linux/kernel.h>#include <linux/version.h>#include <linux/slab.h>#include <linux/config.h>#include <asm/system.h>#include <asm/segment.h>#include <linux/timex.h>#include <rtl_conf.h>#include <rtl_debug.h>#include <rtl_core.h>#include <rtl.h>#include <rtl_sync.h>#include <rtl_sched.h>#include <rtl_tqueue.h>#include <arch/rtl_switch.h>#ifdef CONFIG_OC_PTIMERS #include <time.h>#endifMODULE_LICENSE("GPL v2");MODULE_AUTHOR("FSMLabs Inc.");MODULE_DESCRIPTION("RTLinux default priority scheduler");static spinlock_t rtl_tqueue_lock;static int rtl_sched_irq;static pthread_t zombie_threads;#ifdef CONFIG_SMPstruct rtl_sched_cpu_struct rtl_sched [NR_CPUS];#elsestruct rtl_sched_cpu_struct rtl_sched [1];#endif/* # define rtl_printf(fac, args...) do ; while (0) */int rtl_startup(void *(*fn)(void *), void *data){ void *retval; rtl_allow_interrupts();#ifdef CONFIG_RTL_FP_SUPPORT if (pthread_self()->uses_fp) { pthread_self()->uses_fp = 0; /* to force save/restore */ pthread_setfp_np (pthread_self(), 1); }#endif /* * This is needed by the V1 API. Since we don't do the V1 * API on anything but x86 we don't need to do the schedule, * though. * -- Cort */ rtl_posix_init(pthread_self()); rtl_schedule(); retval = (*fn)(data); pthread_exit(retval); /* will never reach this line */ return 0;}static inline pthread_t rtl_get_linux_thread(int cpu_id){ return &(rtl_sched[cpu_id].rtl_linux_task);}int rtl_setclockmode (clockid_t clock, int mode, hrtime_t param){ int ret; rtl_irqstate_t flags; rtl_no_interrupts (flags); ret = clock->settimermode (clock, mode); if (ret != 0) { rtl_restore_interrupts (flags); return ret; } if (mode == RTL_CLOCK_MODE_ONESHOT) { param = HRTIME_INFINITY; } ret = clock->settimer (clock, param); rtl_restore_interrupts (flags); return ret;}struct task_struct *get_linux_current(void){#if !defined(__i386__) return current;#else if ( RTL_CURRENT == rtl_get_linux_thread(rtl_getcpuid()) ) { return current; } else { int *sp = rtl_get_linux_thread(rtl_getcpuid())->stack; return (struct task_struct *)((ulong)sp & (ulong)~8191UL); }#endif }static void rtl_reschedule_thread(pthread_t t){#ifdef CONFIG_SMP if (t->cpu != rtl_getcpuid()) rtl_reschedule (t->cpu); else#endif rtl_schedule();}/* Note: as per POSIX standard, delivery of the signal is not necessarily soon. In RTLinux it waits until the next scheduling you must do sigqueue(cpu_id,RTL_RESCHED_SIGNAL) to get it to work. but TODO sigqueue Note that currently no soft signals require any real processing so we don't need to be "in the context" of the receiving thread. Signals are hard: meaning we get them in the process context anyways (on top of whatever thread is running) or schedule control which means that we just manipulate the scheduler status and be done. */#define CHECK_VALID(thread) do { if ((thread)->magic != RTL_THREAD_MAGIC) return ESRCH; } while (0)int pthread_kill(pthread_t thread, int signal){ if ((unsigned) signal <= RTL_MAX_SIGNAL) { CHECK_VALID(thread); if (signal == 0) { return 0; } set_bit(signal, &thread->pending);#ifdef CONFIG_OC_PSIGNALS if ((signal >= RTL_SIGUSR1) && HIGHER_PRIORITY_THREAD(thread,pthread_self())) rtl_schedule();#endif return 0; } else if(thread != rtl_get_linux_thread(rtl_getcpuid())) { return EINVAL; } else { /* the signal number means something else */ /* TODO one range for local one for global */ if((signal < RTL_LINUX_MIN_SIGNAL) || (signal > RTL_LINUX_MAX_SIGNAL)) return EINVAL; else if(signal < RTL_LINUX_MIN_LOCAL_SIGNAL){ /* it's global */ rtl_global_pend_irq (signal - RTL_LINUX_MIN_SIGNAL ); }#ifdef CONFIG_SMP else{ rtl_local_pend_vec(signal - RTL_LINUX_MIN_LOCAL_SIGNAL,rtl_getcpuid()); }#endif return 0; }}static void do_cleanups(pthread_t t){ struct rtl_cleanup_struct *ts = t->cleanup; while ((ts = t->cleanup)) { t->cleanup = ts->next; ts->routine(ts->arg); }}/* call with interrupts disabled */static int remove_from_this_cpu(pthread_t thread){ int found = 0; struct rtl_thread_struct *t; schedule_t *s; s = LOCAL_SCHED;#ifdef CONFIG_SMP { unsigned int cpu_id = rtl_getcpuid(); if (thread->cpu != cpu_id){ rtl_printf("RTLinux ERROR: remove_from this cpu crosses CPUs\n"); return ESRCH; } }#endif rtl_spin_lock (&s->rtl_tasks_lock); if (thread != s->rtl_tasks) { for (t = s->rtl_tasks; t; t = t->next) { if (t->next == thread) { t->next = thread->next; found = 1; break; } } if (!found) { rtl_spin_unlock (&s->rtl_tasks_lock); return ESRCH; } } else { s->rtl_tasks = thread->next; } if (s->rtl_task_fpu_owner == thread) { s->rtl_task_fpu_owner = 0; } rtl_spin_unlock (&s->rtl_tasks_lock); return 0;}static void rtl_task_free_memory(void *p){ struct rtl_thread_struct *task = (struct rtl_thread_struct *) p; if (task->kmalloc_stack_bottom) {/* rtl_printf("freeing %#x %#x\n", task->kmalloc_stack_bottom); */ kfree (task->kmalloc_stack_bottom); } kfree (task);}static int delete_thread(pthread_t thread){ int ret; struct tq_struct free_task; rtl_irqstate_t interrupt_state; rtl_no_interrupts (interrupt_state); memset(&free_task, 0, sizeof(free_task)); free_task.data = thread; free_task.routine = rtl_task_free_memory; thread->cleanup = (struct rtl_cleanup_struct *) &free_task; thread->magic = 0; ret = remove_from_this_cpu (thread); if (ret != 0) { rtl_restore_interrupts (interrupt_state); return ret; } spin_lock(&rtl_tqueue_lock); thread->next = zombie_threads; zombie_threads = thread; spin_unlock(&rtl_tqueue_lock); rtl_global_pend_irq (rtl_sched_irq); pthread_suspend_np(pthread_self()); rtl_restore_interrupts (interrupt_state); return 0;}void pthread_exit(void *retval){ rtl_irqstate_t flags; pthread_t self = pthread_self();#ifdef CONFIG_OC_PSIGNALS /* Really necessary to block user signals?*/ rtl_sigset_t mask; clear_bit(RTL_SIGNAL_HANDLER_EXECUTION_INPROGRESS,&self->pending); rtl_sigfillset(&mask); pthread_sigmask(SIG_SETMASK,&mask,NULL);#endif rtl_no_interrupts(flags); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); do_cleanups(self); rtl_posix_cleanup(retval); set_bit (RTL_THREAD_FINISHED, &self->threadflags); delete_thread(pthread_self()); rtl_schedule(); /* will never reach this line */ rtl_restore_interrupts(flags); rtl_printf("pthread_exit() returned!\n");}int sched_yield(void){ rtl_schedule(); return 0;}int pthread_yield(void){ rtl_schedule(); return 0;}int pthread_make_periodic_np (pthread_t p, hrtime_t start_time, hrtime_t period){ rtl_irqstate_t interrupt_state; if (period < 0) { return EINVAL; } rtl_no_interrupts(interrupt_state); p->period = period; __rtl_setup_timeout(p, start_time); rtl_reschedule_thread(p); rtl_restore_interrupts(interrupt_state); return 0;}int pthread_setperiod_np(pthread_t p, const struct itimerspec *value ){#ifdef CONFIG_SMP { unsigned int cpu_id = rtl_getcpuid(); if (p->cpu != cpu_id){ rtl_printf("RTLinux ERROR: pthread_setperiod crosses cpus\n"); return ESRCH; } }#endif if (!timespec_nz(&value->it_value)) { pthread_make_periodic_np (p, HRTIME_INFINITY, 0); } else { pthread_make_periodic_np (p, timespec_to_ns (&value->it_value), timespec_to_ns (&value->it_interval)); } return 0;}#define do_abort(t) do { clear_bit(RTL_THREAD_TIMERARMED, &t->threadflags); if (t->abort) { t->abort(t->abortdata);}} while (0)static void rtl_posix_cancel(int signal){ set_bit (RTL_CANCELPENDING, &pthread_self()->threadflags); if (test_bit (RTL_CANCELTYPE, &pthread_self()->threadflags)) { pthread_testcancel(); }}static void (*rtl_sigcancel_handler)(int signal) = rtl_posix_cancel;static inline void do_signal(pthread_t t){ rtl_irqstate_t flags; rtl_no_interrupts(flags); if (test_and_clear_bit(RTL_SIGNAL_SUSPEND, &t->pending)) { /* t->abort = 0; t->abortdata = 0; */ do_abort(t); RTL_MARK_SUSPENDED(t); } if (test_and_clear_bit(RTL_SIGNAL_WAKEUP, &t->pending)) { RTL_MARK_READY(t); do_abort(t); } if (test_and_clear_bit(RTL_SIGNAL_TIMER, &t->pending)) { RTL_MARK_READY(t); do_abort(t); } if (!test_bit(RTL_SIGNAL_CANCEL, &t->blocked) && test_and_clear_bit(RTL_SIGNAL_CANCEL, &t->pending)) { RTL_MARK_READY(t); do_abort(t); rtl_restore_interrupts(flags); (*rtl_sigcancel_handler)(RTL_SIGNAL_CANCEL); } rtl_restore_interrupts(flags);}#ifdef CONFIG_OC_PSIGNALSstatic inline void do_user_signal(pthread_t t){ int sig; unsigned long flags, initial_interrupt_state; void (*fun)(int)=NULL; rtl_no_interrupts(initial_interrupt_state); for (sig=RTL_MAX_SIGNAL;sig>=RTL_SIGUSR1;sig--){ if (test_bit(sig,&t->pending) && !test_bit(sig,&t->blocked)){ if (rtl_sigact[sig].owner==t){ fun=rtl_sigact[sig].sa_handler; if (fun) { // Block signal being managed. set_bit(sig,&t->blocked); set_bit(RTL_SIGNAL_HANDLER_EXECUTION_INPROGRESS,&t->pending); rtl_allow_interrupts(); fun(sig); rtl_no_interrupts(flags); clear_bit(RTL_SIGNAL_HANDLER_EXECUTION_INPROGRESS,&t->pending); clear_bit(sig,&t->blocked); if (test_and_clear_bit(RTL_THREAD_SIGNAL_INTERRUMPIBLE,&t->threadflags)){ pthread_kill(t,RTL_SIGNAL_WAKEUP); } } clear_bit (RTL_SCHED_TIMER_OK, &LOCAL_SCHED->sched_flags); } /* The bit should be pending until unblocked. */ clear_bit(sig,&t->pending); } } rtl_restore_interrupts(initial_interrupt_state);}#endifint rtl_schedule(void){ schedule_t *sched; struct rtl_thread_struct *t; struct rtl_thread_struct *new_task = 0; struct rtl_thread_struct *preemptor = 0; hrtime_t preempt_time=HRTIME_INFINITY;#ifdef CONFIG_OC_PTIMERS timer_t timer =0;#endif unsigned long interrupt_state; int cpu_id = rtl_getcpuid(); hrtime_t now; rtl_sigset_t mask;idle: rtl_no_interrupts(interrupt_state); new_task = 0; sched = &rtl_sched[cpu_id]; now = sched->clock->gethrtime(sched->clock);#ifdef CONFIG_OC_PTIMERS for (timer=get_timer_list_start();timer;timer=timer->next){ if (timer_expiration(timer,now,sched->clock->mode)){ expiration_notification(timer); UPDATE_TIMER(timer); } }#endif for(t = sched->rtl_tasks; t; t = t->next){ if(test_bit(RTL_THREAD_TIMERARMED, &t->threadflags)){ /* expire timers and find preemptor */ if(now >= t->resume_time){ clear_bit(RTL_THREAD_TIMERARMED, &t->threadflags); rtl_sigaddset(&t->pending, RTL_SIGNAL_TIMER); } /* find earliest deadline preemptor */ else if(!preemptor || ((t->sched_param.sched_priority >= preemptor->sched_param.sched_priority) && (t->resume_time < preemptor->resume_time)) ) { preemptor=t; } } /* find highest priority runnable task */ if ((t->pending & ~t->blocked) && (!new_task || (t->sched_param.sched_priority > new_task->sched_param.sched_priority))) { new_task = t; } } if(!new_task){ rtl_restore_interrupts(interrupt_state); goto idle; } if (preemptor) preempt_time=preemptor->resume_time; #ifdef CONFIG_OC_PTIMERS for (timer=get_timer_list_start();timer;timer=timer->next){ // find preemptor. if (timer->owner->sched_param.sched_priority > new_task->sched_param.sched_priority){ if( TIMER_ARMED(timer)){ if (timer->expires.it_value < preempt_time) { preempt_time = timer->expires.it_value; } } } else { /* timer list is ordered by owner priority */ break; } } #endif /* we have the new task selected. If we have a preemptor set the * timer to to the preemptors expire time otherwise set it to * the virtual linux timer interrupt. */ if (sched->clock->mode == RTL_CLOCK_MODE_ONESHOT && !test_bit (RTL_SCHED_TIMER_OK, &sched->sched_flags)) { if(preempt_time!=HRTIME_INFINITY) /* we have a preemtor */ { (sched->clock)->settimer(sched->clock, preempt_time - now); }else{ (sched->clock)->settimer(sched->clock, (HRTICKS_PER_SEC/HZ)/2); } set_bit(RTL_SCHED_TIMER_OK, &sched->sched_flags); } /* if the new_task == the current task we do nothing but return */ if (new_task != sched->rtl_current) { /* switch out old, switch in new */ /* rtl_make_rt_system_* are only called here * cpu_id was set right at the beginnning of rtl_schedule * no need to call it again here. Swap the if / else as * the case of interest for rt is != linux . */ if (new_task != &sched->rtl_linux_task) { /* rtl_make_rt_system_active(); */ L_SET(l_busy); } else { /*rtl_make_rt_system_idle();*/ L_CLEAR(l_busy); } rtl_trace2 (RTL_TRACE_SCHED_CTX_SWITCH, (long) new_task); rtl_switch_to(&sched->rtl_current, new_task); /* delay switching the FPU context until it is really needed */#ifdef CONFIG_RTL_FP_SUPPORT if (sched->rtl_current-> uses_fp &&\
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -