?? rtl_sched.c
字號:
sched->rtl_task_fpu_owner != sched->rtl_current) { if (sched->rtl_task_fpu_owner) { rtl_fpu_save (sched,sched->rtl_task_fpu_owner); } rtl_fpu_restore (sched,sched->rtl_current); sched->rtl_task_fpu_owner = sched->rtl_current; }#endif /* CONFIG_RTL_FP_SUPPORT */ } mask = pthread_self()->pending; rtl_restore_interrupts(interrupt_state); /* handle any pending signals of the new task */ if (pthread_self()->pending & ~(1 << RTL_SIGNAL_READY)) do_signal(pthread_self());#ifdef CONFIG_OC_PSIGNALS /* Signal handlers are non re-entrant, i.e. first finish current handler execution and then manage the rest of pending signals. */ if (test_bit(RTL_SIGNAL_HANDLER_EXECUTION_INPROGRESS,&pthread_self()->pending)) goto end; if ((pthread_self()->pending & ~pthread_self()->blocked) & RTL_THREAD_SIGNALS_MASK) do_user_signal(pthread_self());#endif if (!rtl_sigismember(&pthread_self()->pending, RTL_SIGNAL_READY)) { goto idle; } #ifdef CONFIG_OC_PSIGNALSend: #endif rtl_trace2 (RTL_TRACE_SCHED_OUT, (long) pthread_self()); return mask;}int rtl_cpu_exists (int cpu){ int n; int i; for (i = 0; i < rtl_num_cpus(); i++) { n = cpu_logical_map (i); if (n == cpu) { return 1; } } return 0;}int pthread_suspend_np (pthread_t thread){ if (thread == pthread_self()) { RTL_MARK_SUSPENDED (pthread_self()); rtl_schedule(); pthread_testcancel(); } else { pthread_kill(thread,RTL_SIGNAL_SUSPEND); } return 0;}int pthread_wakeup_np (pthread_t thread){#ifdef CONFIG_SMP if (thread->cpu != rtl_getcpuid()) { pthread_kill(thread,RTL_SIGNAL_WAKEUP); rtl_reschedule_thread(thread); } else#endif { pthread_kill(thread,RTL_SIGNAL_WAKEUP); rtl_schedule(); } return 0;}int pthread_attr_setcpu_np(pthread_attr_t *attr, int cpu){#ifdef CONFIG_SMP if (!rtl_cpu_exists(cpu)) { return EINVAL; }#endif attr->cpu = cpu; return 0;}int pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr){ attr->stack_addr = stackaddr; return 0;}int pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr){ *stackaddr = attr->stack_addr; return 0;}static void add_to_task_list(pthread_t thread){ schedule_t *s = LOCAL_SCHED; thread->next = s -> rtl_tasks; s->rtl_tasks = thread;} int __pthread_create (pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg, struct module *creator){ int *stack_addr; long interrupt_state; struct rtl_thread_struct *task; pthread_attr_t default_attr; int stack_size; if (!attr) { pthread_attr_init(&default_attr); attr = &default_attr; } stack_size = attr->stack_size; stack_addr = (int *) attr->stack_addr; task = (struct rtl_thread_struct *) kmalloc(sizeof(struct rtl_thread_struct), GFP_KERNEL); if (!task) { return EAGAIN; } if (!stack_addr) { if (pthread_self() != pthread_linux()) { kfree(task); return EAGAIN; } stack_addr = (int *) kmalloc(stack_size, GFP_KERNEL); if (!stack_addr) { kfree(task); return EAGAIN; } task->kmalloc_stack_bottom = stack_addr; } else { task->kmalloc_stack_bottom = 0; } *thread = task; task -> threadflags = 0; if (attr->detachstate == PTHREAD_CREATE_JOINABLE) { set_bit(RTL_THREAD_JOINABLE, &task->threadflags); } task->magic = RTL_THREAD_MAGIC; task->creator = creator; task->pending = attr->initial_state; task->blocked = 0; task->abort = 0; task->cpu = attr->cpu; task->cleanup = 0; task->resume_time = HRTIME_INFINITY; task->period = 0; task->sched_param = attr->sched_param; task->stack = stack_addr + stack_size / sizeof(int); rtl_init_stack(task,start_routine, arg,rtl_startup); rtl_no_interrupts(interrupt_state); task->fpu_initialized = 0; task->uses_fp = attr->use_fp; { schedule_t *s = sched_data(task->cpu);#ifdef CONFIG_SMP if (task->cpu != rtl_getcpuid()) { rtl_spin_lock (&s->rtl_tasks_lock); task->next = s -> rtl_new_tasks; s->rtl_new_tasks = task; rtl_spin_unlock (&s->rtl_tasks_lock); rtl_reschedule (task->cpu); } else#endif { rtl_spin_lock (&s->rtl_tasks_lock); add_to_task_list(task); rtl_spin_unlock (&s->rtl_tasks_lock); rtl_schedule(); } } rtl_restore_interrupts(interrupt_state); return 0;}#ifdef CONFIG_RTL_FP_SUPPORTint pthread_attr_setfp_np (pthread_attr_t *attr, int flag){ attr->use_fp = flag; return 0;}int pthread_setfp_np (pthread_t thread, int flag){ schedule_t *sched = LOCAL_SCHED; rtl_irqstate_t flags;#ifdef CONFIG_SMP if (rtl_getcpuid() != thread->cpu) { rtl_printf("pthread_setfp_np() called on a wrong CPU!\n"); return EINVAL; }#endif rtl_no_interrupts(flags);#ifdef CONFIG_RTL_FP_SUPPORT if (thread -> uses_fp != flag) { thread -> uses_fp = flag; if (flag) { if (thread == pthread_self()) { if (sched->rtl_task_fpu_owner) { rtl_fpu_save (sched, sched->rtl_task_fpu_owner); } rtl_fpu_restore (sched, thread); sched->rtl_task_fpu_owner = thread; } }/* else { if (sched->rtl_task_fpu_owner == thread) { sched->rtl_task_fpu_owner = 0; } } */ }#endif /* CONFIG_RTL_FP_SUPPORT */ rtl_restore_interrupts(flags); return 0;}#endifstatic void rtl_sched_timer_interrupt( struct pt_regs *regs){ clear_bit (RTL_SCHED_TIMER_OK, &LOCAL_SCHED->sched_flags); \ rtl_schedule();}int pthread_setcancelstate(int state, int *oldstate){ if (oldstate) { *oldstate = rtl_sigismember (&pthread_self()->blocked, RTL_SIGNAL_CANCEL); } if (state) { set_bit (RTL_SIGNAL_CANCEL, &pthread_self()->blocked); } else { clear_bit (RTL_SIGNAL_CANCEL, &pthread_self()->blocked); } return 0;}int pthread_setcanceltype(int type, int *oldtype){ rtl_irqstate_t flags; rtl_no_interrupts (flags); if (oldtype) { *oldtype = test_bit(RTL_CANCELTYPE, &pthread_self()->threadflags); } if (type) { set_bit (RTL_CANCELTYPE, &pthread_self()->threadflags); } else { clear_bit (RTL_CANCELTYPE, &pthread_self()->threadflags); } rtl_restore_interrupts(flags); return 0;}int pthread_cancel (pthread_t thread){ if (RTL_PRIO(thread) < RTL_PRIO(pthread_linux())) { RTL_PRIO(thread) = RTL_PRIO(pthread_linux()) + 3; } return pthread_kill(thread, RTL_SIGNAL_CANCEL);}void pthread_testcancel(void){ if (test_and_clear_bit(RTL_CANCELPENDING, &pthread_self()->threadflags)) { pthread_exit(PTHREAD_CANCELED); }}int pthread_delete_np (pthread_t thread){ hrtime_t timeout = gethrtime() + 1000000000; clear_bit (RTL_SIGNAL_CANCEL, &thread->blocked); set_bit (RTL_CANCELTYPE, &thread->threadflags); set_bit(RTL_CANCELPENDING, &thread->threadflags); clear_bit(RTL_THREAD_JOINABLE, &thread->threadflags); /* now the cancel is enabled+asynchronous */ pthread_cancel (thread); while (!test_bit (RTL_THREAD_FINISHED, &thread->threadflags)) { barrier(); rtl_schedule(); if (gethrtime() >= timeout) { rtl_printf("timed out in pthread_delete_np!\n"); break; } } return 0;}#ifdef RTL_DEBUGvoid rtl_dump_tasks(void){ schedule_t *sched = LOCAL_SCHED; struct rtl_thread_struct *t; hrtime_t ts = sched->clock->gethrtime(sched->clock); rtl_printf("Tasks on CPU %d time = (%9d)\n", rtl_getcpuid(), ts); for (t = sched->rtl_tasks; t; t = t->next) { if (t == &sched->rtl_linux_task) { rtl_printf("linux task "); } rtl_printf("addr=%08x state=%04x i=(%9d) p=(%9d)\n", t, t->state, t->resume_time, t->period); }}#endifint pthread_wait_np(void){ long interrupt_state; pthread_t self = pthread_self(); rtl_no_interrupts(interrupt_state); RTL_MARK_SUSPENDED (self); __rtl_setup_timeout (self, self->resume_time); self->resume_time += self->period; rtl_schedule(); pthread_testcancel(); rtl_restore_interrupts(interrupt_state); return 0;}#ifdef CONFIG_SMPstatic unsigned int resched_irq(struct pt_regs *r){ pthread_t t; pthread_t nexttask;/* rtl_printf(" r on cpu %d; rtl_new_tasks = %x\n", rtl_getcpuid(), LOCAL_SCHED->rtl_new_tasks); */ rtl_spin_lock (&LOCAL_SCHED->rtl_tasks_lock); for (t = LOCAL_SCHED->rtl_new_tasks; t; t = nexttask) { nexttask = t->next; add_to_task_list(t); /* modifies t->next */ } LOCAL_SCHED->rtl_new_tasks = 0; rtl_spin_unlock (&LOCAL_SCHED->rtl_tasks_lock); rtl_schedule(); return 0;}#endif /* CONFIG_SMP */static void sched_irq_handler (int irq, void *dev_id, struct pt_regs *p){ rtl_irqstate_t flags; rtl_spin_lock_irqsave(&rtl_tqueue_lock, flags); while (zombie_threads) { pthread_t th = zombie_threads; zombie_threads = zombie_threads->next; rtl_spin_unlock_irqrestore(&rtl_tqueue_lock, flags); rtl_posix_on_delete(th); if (th->kmalloc_stack_bottom) { rtl_schedule_task ((struct tq_struct *) th->cleanup); } rtl_spin_lock_irqsave(&rtl_tqueue_lock, flags); } rtl_spin_unlock_irqrestore(&rtl_tqueue_lock, flags);}static int *thread_errno_location(void){ return &(RTL_CURRENT->errno_val);}static int *(*save_errno_location)(void); int init_module(void){ rtl_irqstate_t interrupt_state; int i; int ret; int my_cpu_id; schedule_t *s; unsigned int cpu_id = rtl_getcpuid(); #ifdef CONFIG_OC_PTIMERS rtl_spin_lock_init(&rtl_timer_list_lock);#endif rtl_spin_lock_init (&rtl_tqueue_lock); zombie_threads = 0; ret = rtl_get_soft_irq (sched_irq_handler, "RTLinux Scheduler"); if (ret > 0) { rtl_sched_irq = ret ; } else { rtl_printf ("Can't get an irq for RTLinux scheduler"); return -EINVAL; } rtl_no_interrupts(interrupt_state); my_cpu_id = cpu_id; for (i = 0; i < rtl_num_cpus(); i++) { cpu_id = cpu_logical_map (i); s = &rtl_sched [cpu_id]; s -> rtl_current = &s->rtl_linux_task; s -> rtl_tasks = &s->rtl_linux_task; s -> rtl_new_tasks = 0; rtl_spin_lock_init (&s->rtl_tasks_lock); s -> rtl_linux_task . magic = RTL_THREAD_MAGIC; rtl_sigemptyset(&s -> rtl_linux_task . pending); rtl_sigaddset(&s -> rtl_linux_task . pending, RTL_SIGNAL_READY); s -> rtl_linux_task . blocked = 0; s -> rtl_linux_task . threadflags = 0; s -> rtl_linux_task . sched_param . sched_priority = -1; s -> rtl_linux_task . next = 0; s -> rtl_linux_task . uses_fp = 1; s -> rtl_linux_task . fpu_initialized = 1; s -> rtl_linux_task . creator = 0; s -> rtl_linux_task . abort = 0; s -> rtl_task_fpu_owner = &s->rtl_linux_task; s -> sched_flags = 0; rtl_posix_init (&s->rtl_linux_task); s-> clock = rtl_getbestclock (cpu_id); if (s->clock && rtl_setclockhandler (s->clock, rtl_sched_timer_interrupt) == 0) { s->clock->init(s->clock); } else { rtl_printf("Can't get a clock for processor %d\n",cpu_id); rtl_restore_interrupts (interrupt_state); return -EINVAL; } } cpu_id = my_cpu_id;#ifdef CONFIG_SMP for (i = 0; i < rtl_num_cpus(); i++) { int cpu; int ret; cpu = cpu_logical_map (i); s = &rtl_sched [cpu]; ret = rtl_request_ipi(resched_irq, cpu); }#endif rtl_restore_interrupts (interrupt_state); save_errno_location = __errno_location_ptr; __errno_location_ptr = thread_errno_location;/* rtl_setdebug (RTLDBG_ALL); */ return 0;}void cleanup_module(void){ int i; int cpu; schedule_t *s; __errno_location_ptr = save_errno_location; rtl_free_soft_irq(rtl_sched_irq); for (i = 0; i < rtl_num_cpus(); i++) { cpu = cpu_logical_map (i); s = &rtl_sched [cpu]; s->clock->uninit(s->clock);#ifdef CONFIG_SMP rtl_free_ipi(cpu);#endif }}int usleep (useconds_t interval){ rtl_irqstate_t flags; pthread_t th = pthread_self(); hrtime_t save_resume_time; rtl_no_interrupts (flags); save_resume_time = th->resume_time; RTL_MARK_SUSPENDED(th); __rtl_setup_timeout (th, gethrtime() + (long long) interval * 1000LL); rtl_schedule(); pthread_testcancel(); th->resume_time = save_resume_time; rtl_restore_interrupts (flags); return 0;}int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp){ hrtime_t timeout; rtl_irqstate_t irqstate; hrtime_t save_resume_time; pthread_t self = pthread_self(); int ret; if (rqtp == (const struct timespec *) &self->timeval) { timeout = self->timeval; } else { timeout = timespec_to_ns(rqtp); } rtl_no_interrupts (irqstate); if (!(flags & TIMER_ABSTIME)) { timeout += clock_gethrtime(CLOCK_RTL_SCHED); } else { timeout = __rtl_fix_timeout_for_clock(clock_id, timeout); } save_resume_time = self->resume_time;#ifdef CONFIG_OC_PSIGNALS set_bit(RTL_THREAD_SIGNAL_INTERRUMPIBLE,&self->threadflags);#endif RTL_MARK_SUSPENDED(self); __rtl_setup_timeout (self, timeout); ret = rtl_schedule(); pthread_testcancel(); if (RTL_TIMED_OUT(&ret)) { self->resume_time = save_resume_time;#ifdef CONFIG_OC_PSIGNALS clear_bit(RTL_THREAD_SIGNAL_INTERRUMPIBLE,&self->threadflags);#endif rtl_restore_interrupts (irqstate); return 0; } /* interrupted by a signal */ if (!(flags & TIMER_ABSTIME) && rmtp) { *rmtp = timespec_from_ns(self->resume_time - clock_gethrtime(CLOCK_RTL_SCHED)); } self->resume_time = save_resume_time; rtl_restore_interrupts (irqstate); return EINTR;}int nanosleep(const struct timespec *rqtp, struct timespec *rmtp){ int ret; if ((ret = clock_nanosleep(CLOCK_RTL_SCHED, 0, rqtp, rmtp))) { errno = ret; return -1; } return 0;}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -