?? rtai.c
字號:
/* arch/arm/rtai.cCOPYRIGHT (C) 2001 Paolo Mantegazza (mantegazza@aero.polimi.it)COPYRIGHT (C) 2001 Alex Z黳ke, SYSGO RTS GmbH (azu@sysgo.de)COPYRIGHT (C) 2002 Wolfgang M黮ler (wolfgang.mueller@dsa-ac.de)COPYRIGHT (C) 2002 Guennadi Liakhovetski, DSA GmbH (gl@dsa-ac.de)COPYRIGHT (C) 2002 Thomas Gleixner (gleixner@autronix.de)This program is free software; you can redistribute it and/or modifyit under the terms of version 2 of the GNU General Public License aspublished by the Free Software Foundation.This program is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See theGNU General Public License for more details.You should have received a copy of the GNU General Public Licensealong with this program; if not, write to the Free SoftwareFoundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA*//*--------------------------------------------------------------------------Changelog03-10-2002 TG added support for trap handling. added function rt_is_linux.11-07-2003 GL First of a series of fixes, proposed by Thomas Gleixner. Disable interrupts on return from dispatch_[irq|srq]().12-07-2003 GL Move closer to Linux. Remove re-declaration of irq_desc, replace IBIT with Linux' I_BIT, remove unneeded rtai_irq_t type.21-07-2003 GL Fix a race in linux_sti(), created by making linux_sti() re-entrant.28-07-2003 GL Improve handling of pending interrupts to Linux, put debugging BUG() statement, idea of which belongs to Thomas Gleixner.29-07-2003 GL Initial support for 2.4.19-rmk7 for StrongARM.28-08-2003 GL Use native Linux masking for linux-only interrupts. Handle unmasking of RTAI-Linux shared interrupts, deliver them, if new ones arrived, while the interrupt line was masked by Linux.*/#include <linux/config.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/interrupt.h>#include <linux/init.h>#include <linux/compiler.h>#include <asm/system.h>#include <asm/smp.h>#include <asm/io.h>#include <linux/bitops.h>#include <asm/atomic.h>#ifdef CONFIG_PROC_FS#include <linux/stat.h>#include <linux/proc_fs.h>#include <rtai_proc_fs.h>#endif#include <asm/rtai.h>#include <asm/rtai_srq.h>#include <rtai_version.h>#include <rtai_trace.h>#undef CONFIG_RTAI_MOUNT_ON_LOAD// proc filesystem additions.#ifdef CONFIG_PROC_FSstatic int rtai_proc_register(void);static void rtai_proc_unregister(void);#endif// End of proc filesystem additions./* Some define */#define NR_SYSRQS 32#define NR_TRAPS 32/* these are prototypes for timer-handling abstraction */extern void linux_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);extern void soft_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);struct global_rt_status { volatile unsigned int used_by_linux; volatile unsigned int locked_cpus; volatile int irq_in, irq_out, lost_irqs; volatile rtai_irq_mask_t pending_srqs; volatile rtai_irq_mask_t active_srqs; struct list_head pending_linux_irq; spinlock_t data_lock; spinlock_t ic_lock;};static struct global_rt_status global __attribute__ ((aligned(32)));volatile unsigned int *locked_cpus = &global.locked_cpus;/* VERY IMPORTANT, since I saw no way to just ack, we mask_ack always, so *//* it is likely we have to recall to set an arch dependent call to unmask *//* in the scheduler timer handler. Other arch allow just to ack, maybe we'll *//* we can get along as it is now, let's recall this point. */#include <asm/mach/irq.h>extern struct irqdesc irq_desc[];#include <asm/rtai_irqops.h>/* Most of our data */static struct irq_handling { void (*handler)(int irq, void *dev_id, struct pt_regs *regs); void *dev_id; unsigned long count;} global_irq[NR_IRQS] __attribute__ ((__aligned__(32)));static struct irqdesc shadow_irq_desc[NR_IRQS];static struct sysrq_t { unsigned int label; void (*rtai_handler)(void); long long (*user_handler)(unsigned int whatever);} sysrq[NR_SYSRQS];static RT_TRAP_HANDLER rtai_trap_handler[NR_TRAPS];// The main items to be saved-restored to make Linux our humble slavestatic struct rt_hal linux_rthal;static struct pt_regs rtai_regs; // Dummy registers.static void *saved_timer_action_handler; // Saved timer-action handlerstatic struct cpu_own_status { volatile unsigned int intr_flag; volatile unsigned int linux_intr_flag; volatile rtai_irq_mask_t pending_irqs; volatile rtai_irq_mask_t active_irqs;} processor[NR_RT_CPUS];void send_ipi_shorthand(unsigned int shorthand, unsigned int irq) { }void send_ipi_logical(unsigned long dest, unsigned int irq) { }//static void hard_lock_all_handler(void) { }volatile union rtai_tsc rtai_tsc;static void linux_cli(void){ processor[hard_cpu_id()].intr_flag = 0;}static void (*ic_mask_ack_irq[NR_IRQS])(unsigned int irq);static void (*ic_mask_irq[NR_IRQS])(unsigned int irq);static void (*ic_unmask_irq[NR_IRQS])(unsigned int irq);/** * linux_sti() must be re-entrant. */static void linux_sti(void){ unsigned int irq; unsigned long flags; unsigned long cpuid; struct cpu_own_status *cpu; cpu = processor + (cpuid = hard_cpu_id()); hard_save_flags(flags); if (unlikely(flags & I_BIT)) { hard_sti(); BUG(); } rt_spin_lock_irq(&(global.data_lock)); while (have_pending_irq() != NO_IRQ || have_pending_srq()) { cpu->intr_flag = 0; /* cli */ while ((irq = have_pending_irq()) != NO_IRQ) { struct irqdesc *desc = irq_desc + irq;#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,19) if (desc->pending || desc->disable_depth || ! list_empty(&desc->pend)) { printk(KERN_CRIT "IRQ %d: pending %d, disable_depth %d, running %d, list %sempty\n", irq, desc->pending, desc->disable_depth, desc->running, list_empty(&desc->pend) ? "" : "not "); BUG(); }#else if (! desc->enabled) { printk(KERN_CRIT "IRQ %d: disabled\n", irq); BUG(); }#endif clear_pending_irq(irq); /* Emulate Linux behaviour, i.e. serve multiplexed interrupts 1 at a time */ if (isdemuxirq(irq)) irq_desc[ARCH_MUX_IRQ].running = 1; rt_spin_unlock_irq(&(global.data_lock)); // ** call old Linux do_IRQ() to handle IRQ linux_rthal.do_IRQ(irq, &rtai_regs); /* Unmasking is done in do_IRQ above - don't do twice */ rt_spin_lock_irq(&(global.data_lock)); if (isdemuxirq(irq)) irq_desc[ARCH_MUX_IRQ].running = 0; } // Local IRQ Handling - missing here ... only on SMP cpu->intr_flag = I_BIT | (1 << cpuid); // sti() if (have_pending_srq()) { // SRQ Handling - same as on PPC irq = pending_srq(); activate_srq(irq); clear_pending_srq(irq); rt_spin_unlock_irq(&(global.data_lock)); if (sysrq[irq].rtai_handler) { sysrq[irq].rtai_handler(); } rt_spin_lock_irq(&(global.data_lock)); deactivate_srq(irq); } } rt_spin_unlock_irq(&(global.data_lock)); cpu->intr_flag = I_BIT | (1 << cpuid);}/* we need to return faked, but real flags * * imagine a function calling our linux_save_flags() while rtai is loaded * and restoring flags when rtai is unloaded. CPSR is broken ... */static unsigned int linux_save_flags(void){ unsigned long flags; hard_save_flags( flags ); /* check if we are in CLI, then set I bit in flags */ return (flags & ~I_BIT) | ( processor[hard_cpu_id()].intr_flag ? 0 : I_BIT );}static void linux_restore_flags(unsigned int flags){ /* check if CLI-bit is set */ if (flags & I_BIT) { processor[hard_cpu_id()].intr_flag = 0; } else { linux_sti(); }}#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,21)static unsigned int linux_save_flags_sti(void){ unsigned long flags; flags = linux_save_flags(); linux_sti(); return flags;}#endifunsigned int linux_save_flags_and_cli(void){ unsigned long flags; flags = linux_save_flags(); processor[hard_cpu_id()].intr_flag = 0; return flags;}unsigned long linux_save_flags_and_cli_cpuid(int cpuid){ return linux_save_flags_and_cli();}void rtai_just_copy_back(unsigned long flags, int cpuid){ /* also check if CLI-bit is set and set up intr_flag accordingly */ if (flags & I_BIT) { processor[cpuid].intr_flag = 0; } else { processor[cpuid].intr_flag = I_BIT | (1 << cpuid); }}// For the moment we just do mask_ack_unmask, maybe it has to be adjustedstatic void do_nothing_picfun(unsigned int irq) { }static void linux_mask_ack(unsigned int irq){ irq_desc[irq].masked = 1;}unsigned int rt_startup_irq(unsigned int irq){ unsigned int flags; struct irqdesc *irq_desc; if ((irq_desc = &shadow_irq_desc[irq])/* && irq_desc->unmask*/) { flags = rt_spin_lock_irqsave(&global.ic_lock); irq_desc->probing = 0; irq_desc->triggered = 0;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) irq_desc->enabled = 1;#else irq_desc->disable_depth = 0;#endif irq_desc->unmask(irq); rt_spin_unlock_irqrestore(flags, &global.ic_lock); } return 0;}void rt_shutdown_irq(unsigned int irq){ unsigned int flags; struct irqdesc *irq_desc; if ((irq_desc = &shadow_irq_desc[irq])) { flags = rt_spin_lock_irqsave(&global.ic_lock);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) irq_desc->enabled = 0;#else irq_desc->disable_depth = (unsigned int)-1;#endif irq_desc->mask(irq); rt_spin_unlock_irqrestore(flags, &global.ic_lock); }}void rt_enable_irq(unsigned int irq){ unsigned int flags; struct irqdesc *irq_desc; if ((irq_desc = &shadow_irq_desc[irq])) { flags = rt_spin_lock_irqsave(&global.ic_lock);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) irq_desc->probing = 0; irq_desc->triggered = 0; irq_desc->enabled = 1; ic_unmask_irq[irq](irq);#else if ( ! irq_desc->disable_depth ) { printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0)); } else if ( ! --irq_desc->disable_depth ) { irq_desc->probing = 0; ic_unmask_irq[irq](irq); }#endif rt_spin_unlock_irqrestore(flags, &global.ic_lock); }}void rt_disable_irq(unsigned int irq){ unsigned int flags; struct irqdesc *irq_desc; if ((irq_desc = &shadow_irq_desc[irq])) { flags = rt_spin_lock_irqsave(&global.ic_lock);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) irq_desc->enabled = 0; ic_mask_irq[irq](irq);#else if ( ! irq_desc->disable_depth++ ) ic_mask_irq[irq](irq);#endif rt_spin_unlock_irqrestore(flags, &global.ic_lock); }}void rt_mask_ack_irq(unsigned int irq){ unsigned int flags; flags = rt_spin_lock_irqsave(&global.ic_lock); ic_mask_ack_irq[irq](irq); rt_spin_unlock_irqrestore(flags, &global.ic_lock);}void rt_mask_irq(unsigned int irq){ unsigned int flags; flags = rt_spin_lock_irqsave(&global.ic_lock); ic_mask_irq[irq](irq); rt_spin_unlock_irqrestore(flags, &global.ic_lock);}void rt_unmask_irq(unsigned int irq){ unsigned int flags; flags = rt_spin_lock_irqsave(&global.ic_lock); ic_unmask_irq[irq](irq); rt_spin_unlock_irqrestore(flags, &global.ic_lock);}/* * A real time handler must unmask ASAP. Especially important for the timer * ISR. When RTAI is mounted, this should be done in the macro * DO_TIMER_PROPER_OP(), called on entry to the rt_timer_handler(). */asmlinkage void dispatch_irq(int irq, struct pt_regs *regs){ rt_spin_lock(&global.ic_lock); if (irq >= 0 && irq < NR_IRQS) { ic_mask_ack_irq[irq](irq); irq_desc[irq].masked = 0; rt_spin_unlock(&global.ic_lock); TRACE_RTAI_GLOBAL_IRQ_ENTRY(irq, !user_mode(regs)); // We just care about our own RT-Handlers installed if (global_irq[irq].handler) { /* If this interrupt happened in Linux, in the rt-handler below we might context-switch to rt, and then back to Linux, at which point linux_sti() might be called - which is a bit early yet. Prevent it. */ unsigned long flags = linux_save_flags_and_cli(); global_irq[irq].count++; global_irq[irq].handler(irq, global_irq[irq].dev_id, regs); rtai_just_copy_back(flags, hard_cpu_id()); rt_spin_lock_irq(&(global.data_lock)); /* The timer interrupt should be unmasked in the handler, due to RTAI's prioritisation of RT-tasks. Linux has a handler-flag to mark handlers, that want their interrupts to be unmasked immediately (hint). */ ic_unmask_irq[irq](irq); } else { rt_spin_lock(&(global.data_lock)); rt_pend_linux_irq(irq); } if (! isdemuxirq(irq) && (global.used_by_linux & processor[hard_cpu_id()].intr_flag)) { linux_cli(); rt_spin_unlock_irq(&(global.data_lock)); linux_sti(); } else { rt_spin_unlock(&(global.data_lock)); } TRACE_RTAI_GLOBAL_IRQ_EXIT(); } else { rt_spin_unlock(&global.ic_lock); printk(KERN_ERR "RTAI-IRQ: spurious interrupt 0x%02x\n", irq); } hard_cli();}#define MIN_IDT_VEC 0xF0#define MAX_IDT_VEC 0xFFstatic unsigned long long (*idt_table[MAX_IDT_VEC - MIN_IDT_VEC + 1])(unsigned int srq, unsigned long name);asmlinkage long long dispatch_srq(int srq, unsigned long whatever){ unsigned long vec; long long retval = -1; if (!(vec = srq >> 24)) { TRACE_RTAI_SRQ_ENTRY(srq, 0); if (srq > 1 && srq < NR_SYSRQS && sysrq[srq].user_handler) { retval = sysrq[srq].user_handler(whatever); } else { for (srq = 2; srq < NR_SYSRQS; srq++) { if (sysrq[srq].label == whatever) { retval = srq; } } } TRACE_RTAI_SRQ_EXIT(); } else { if ((vec >= MIN_IDT_VEC) && (vec <= MAX_IDT_VEC) && (idt_table[vec - MIN_IDT_VEC])) { TRACE_RTAI_SRQ_ENTRY(srq, 0); retval = idt_table[vec - MIN_IDT_VEC](srq & 0xFFFFFF, whatever); TRACE_RTAI_SRQ_EXIT(); } else { printk("RTAI SRQ DISPATCHER: bad srq (0x%0x)\n", (int) vec); } } hard_cli(); return retval;}struct desc_struct rt_set_full_intr_vect(unsigned int vector, int type, int dpl, void *handler){ struct desc_struct fun = { 0 }; if (vector >= MIN_IDT_VEC && vector <= MAX_IDT_VEC) { fun.fun = idt_table[vector - MIN_IDT_VEC]; idt_table[vector - MIN_IDT_VEC] = handler; if (!rthal.do_SRQ) { rthal.do_SRQ = dispatch_srq; } } return fun;}void rt_reset_full_intr_vect(unsigned int vector, struct desc_struct idt_element){ if (vector >= MIN_IDT_VEC && vector <= MAX_IDT_VEC) { idt_table[vector - MIN_IDT_VEC] = idt_element.fun; }}/* * Dispatch Traps like Illegal instruction, .... * Keep call compatible to x386 */asmlinkage int dispatch_trap(int vector, struct pt_regs *regs){ if ( (vector < NR_TRAPS) && (rtai_trap_handler[vector]) ) return rtai_trap_handler[vector](vector, vector, regs, NULL); return 1; /* Let Linux do the job */}RT_TRAP_HANDLER rt_set_rtai_trap_handler(int trap, RT_TRAP_HANDLER handler){ RT_TRAP_HANDLER old_handler = NULL; if (trap < NR_TRAPS) { old_handler = rtai_trap_handler[trap];
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -