?? neighbour.c
字號:
/* neighbour.c * linqianghe@163.com * 2006-09-26 */#include "neighbour.h"#include "log.h"#include <linux/proc_fs.h>#include <linux/random.h>#include <net/dst.h>#define PNEIGH_HASHMASK 0xFstruct neigh_table myarp_tbl;static DEFINE_RWLOCK( myneigh_tbl_lock );static struct neigh_table *myneigh_tables;#ifdef CONFIG_PROC_FSstatic struct neighbour *myneigh_get_first(struct seq_file *seq){ struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; struct neighbour *n = NULL; int bucket = state->bucket; state->flags &= ~NEIGH_SEQ_IS_PNEIGH; for (bucket = 0; bucket <= tbl->hash_mask; bucket++) { n = tbl->hash_buckets[bucket]; while (n) { if (state->neigh_sub_iter) { loff_t fakep = 0; void *v; v = state->neigh_sub_iter(state, n, &fakep); if (!v) goto next; } if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) break; if (n->nud_state & ~NUD_NOARP) break; next: n = n->next; } if (n) break; } state->bucket = bucket; return n;}static struct neighbour *myneigh_get_next(struct seq_file *seq, struct neighbour *n, loff_t *pos){ struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; if (state->neigh_sub_iter) { void *v = state->neigh_sub_iter(state, n, pos); if (v) return n; } n = n->next; while (1) { while (n) { if (state->neigh_sub_iter) { void *v = state->neigh_sub_iter(state, n, pos); if (v) return n; goto next; } if (!(state->flags & NEIGH_SEQ_SKIP_NOARP)) break; if (n->nud_state & ~NUD_NOARP) break;next: n = n->next; } if (n) break; if (++state->bucket > tbl->hash_mask) break; n = tbl->hash_buckets[state->bucket]; } if (n && pos) --(*pos); return n;}static struct neighbour *myneigh_get_idx(struct seq_file *seq, loff_t *pos){ struct neighbour *n = myneigh_get_first(seq); if (n) { while (*pos) { n = myneigh_get_next(seq, n, pos); if (!n) break; } } return *pos ? NULL : n;}static struct pneigh_entry *mypneigh_get_first(struct seq_file *seq){ struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; struct pneigh_entry *pn = NULL; int bucket = state->bucket; state->flags |= NEIGH_SEQ_IS_PNEIGH; for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { pn = tbl->phash_buckets[bucket]; if (pn) break; } state->bucket = bucket; return pn;}static struct pneigh_entry *mypneigh_get_next( struct seq_file *seq, struct pneigh_entry *pn, loff_t *pos ){ struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; pn = pn->next; while (!pn) { if (++state->bucket > PNEIGH_HASHMASK) break; pn = tbl->phash_buckets[state->bucket]; if (pn) break; } if (pn && pos) --(*pos); return pn;}static struct pneigh_entry *mypneigh_get_idx(struct seq_file *seq, loff_t *pos){ struct pneigh_entry *pn = mypneigh_get_first(seq); if (pn) { while (*pos) { pn = mypneigh_get_next(seq, pn, pos); if (!pn) break; } } return *pos ? NULL : pn;}static void *myneigh_get_idx_any(struct seq_file *seq, loff_t *pos){ struct neigh_seq_state *state = seq->private; void *rc; rc = myneigh_get_idx(seq, pos); if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) rc = mypneigh_get_idx(seq, pos); return rc;}void *myneigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags){ struct neigh_seq_state *state = seq->private; loff_t pos_minus_one; state->tbl = tbl; state->bucket = 0; state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH); read_lock_bh(&tbl->lock); pos_minus_one = *pos - 1; return *pos ? myneigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;}EXPORT_SYMBOL_GPL( myneigh_seq_start );void *myneigh_seq_next(struct seq_file *seq, void *v, loff_t *pos){ struct neigh_seq_state *state; void *rc; if (v == SEQ_START_TOKEN) { rc = myneigh_get_idx(seq, pos); goto out; } state = seq->private; if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) { rc = myneigh_get_next(seq, v, NULL); if (rc) goto out; if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY)) rc = mypneigh_get_first(seq); } else { BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY); rc = mypneigh_get_next(seq, v, NULL); }out: ++(*pos); return rc;}EXPORT_SYMBOL_GPL( myneigh_seq_next );void myneigh_seq_stop(struct seq_file *seq, void *v){ struct neigh_seq_state *state = seq->private; struct neigh_table *tbl = state->tbl; read_unlock_bh(&tbl->lock);}EXPORT_SYMBOL_GPL( myneigh_seq_stop );static void *myneigh_stat_seq_start(struct seq_file *seq, loff_t *pos){ struct proc_dir_entry *pde = seq->private; struct neigh_table *tbl = pde->data; int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } return NULL;}static void *myneigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos){ struct proc_dir_entry *pde = seq->private; struct neigh_table *tbl = pde->data; int cpu; for (cpu = *pos; cpu < NR_CPUS; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } return NULL;}static void myneigh_stat_seq_stop(struct seq_file *seq, void *v){}static int myneigh_stat_seq_show(struct seq_file *seq, void *v){ struct proc_dir_entry *pde = seq->private; struct neigh_table *tbl = pde->data; struct neigh_statistics *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries allocs destroys hash_grows" " lookups hits res_failed rcv_probes_mcast" " rcv_probes_ucast periodic_gc_runs forced_gc_runs\n"); return 0; } seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " "%08lx %08lx %08lx %08lx\n", atomic_read(&tbl->entries), st->allocs, st->destroys, st->hash_grows, st->lookups, st->hits, st->res_failed, st->rcv_probes_mcast, st->rcv_probes_ucast, st->periodic_gc_runs, st->forced_gc_runs ); return 0;}static struct seq_operations myneigh_stat_seq_ops = { .start = myneigh_stat_seq_start, .next = myneigh_stat_seq_next, .stop = myneigh_stat_seq_stop, .show = myneigh_stat_seq_show,};static int myneigh_stat_seq_open( struct inode *inode, struct file *file ){ int ret = seq_open(file, &myneigh_stat_seq_ops); if (!ret) { struct seq_file *sf = file->private_data; sf->private = PDE(inode); } return ret;};static struct file_operations myneigh_stat_seq_fops = { .owner = THIS_MODULE, .open = myneigh_stat_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release,};#endif /* CONFIG_PROC_FS */static int myneigh_blackhole(struct sk_buff *skb){ kfree_skb(skb); return -ENETDOWN;}static void myneigh_hash_free(struct neighbour **hash, unsigned int entries){ unsigned long size = entries * sizeof(struct neighbour *); if (size <= PAGE_SIZE) kfree(hash); else free_pages((unsigned long)hash, get_order(size));}static int myneigh_forced_gc(struct neigh_table *tbl){ int shrunk = 0; int i; NEIGH_CACHE_STAT_INC( tbl, forced_gc_runs ); write_lock_bh(&tbl->lock); for( i = 0; i <= tbl->hash_mask; i++ ){ struct neighbour *n, **np; np = &tbl->hash_buckets[i]; while ((n = *np) != NULL) { write_lock( &n->lock ); if( atomic_read(&n->refcnt) == 1 && !(n->nud_state & NUD_PERMANENT) ){ *np = n->next; n->dead = 1; shrunk = 1; write_unlock( &n->lock ); myneigh_release( n ); continue; } write_unlock(&n->lock); np = &n->next; } } tbl->last_flush = jiffies; write_unlock_bh(&tbl->lock); return shrunk;}static __inline__ int myneigh_max_probes( struct neighbour *n ){ struct neigh_parms *p = n->parms; return ( n->nud_state & NUD_PROBE ? p->ucast_probes : p->ucast_probes + p->app_probes + p->mcast_probes);}static void myneigh_timer_handler(unsigned long arg){ unsigned long now, next; struct neighbour *neigh = (struct neighbour *)arg; unsigned state; int notify = 0; write_lock(&neigh->lock); state = neigh->nud_state; now = jiffies; next = now + HZ; if( !(state & NUD_IN_TIMER) ){#ifndef CONFIG_SMP printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");#endif goto out; } if( state & NUD_REACHABLE ){ ; }else if( state & NUD_DELAY ){ ; }else{ next = now + neigh->parms->retrans_time; } PR_DEBUG( "net time: %lu\n", next ); if( (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) && atomic_read(&neigh->probes) >= myneigh_max_probes(neigh) ){ struct sk_buff *skb; neigh->nud_state = NUD_FAILED; notify = 1; NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed); PR_ERR("neigh %p is failed.\n", neigh); myneigh_release( neigh ); //TMPCODE FIXME!! while( neigh->nud_state == NUD_FAILED && (skb = __skb_dequeue(&neigh->arp_queue)) != NULL ){ write_unlock(&neigh->lock); neigh->ops->error_report( neigh, skb ); dev_put( skb->dev ); //TMPCODE FIXME!! write_lock(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); } if (neigh->nud_state & NUD_IN_TIMER) { if( time_before(next, jiffies + HZ/2) ) next = jiffies + HZ/2; if( !mod_timer(&neigh->timer, next) ) neigh_hold(neigh); PR_DEBUG( "neigh: %d\n", atomic_read( &neigh->refcnt ) ); PR_DEBUG( "next time: %lu\n", next ); } if( neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE) ){ struct sk_buff *skb = skb_peek( &neigh->arp_queue ); if( skb ) skb_get(skb); write_unlock( &neigh->lock ); neigh->ops->solicit( neigh, skb ); atomic_inc( &neigh->probes ); if( skb ) kfree_skb(skb); }else{out: write_unlock(&neigh->lock); } PR_DEBUG( "neigh refcnt: %d\n", atomic_read(&neigh->refcnt) ); myneigh_release(neigh);}static struct neighbour *myneigh_alloc( struct neigh_table *tbl ){ struct neighbour *n = NULL; unsigned long now = jiffies; int entries; entries = atomic_inc_return( &tbl->entries ) - 1; PR_DEBUG( "the entryies alrady in tbl: %d\n", entries ); if( entries >= tbl->gc_thresh3 || (entries >= tbl->gc_thresh2 && time_after(now, tbl->last_flush + 5 * HZ)) ){ if( !myneigh_forced_gc( tbl ) && entries >= tbl->gc_thresh3 ) goto out_entries; } n = kmem_cache_alloc( tbl->kmem_cachep, SLAB_ATOMIC ); if( !n ) goto out_entries; memset( n, 0, tbl->entry_size ); skb_queue_head_init( &n->arp_queue ); rwlock_init( &n->lock ); n->updated = n->used = now; n->nud_state = NUD_NONE; n->output = myneigh_blackhole; n->parms = myneigh_parms_clone(&tbl->parms); init_timer( &n->timer ); n->timer.function = myneigh_timer_handler; n->timer.data = (unsigned long)n; NEIGH_CACHE_STAT_INC(tbl, allocs); n->tbl = tbl; atomic_set(&n->refcnt, 1); n->dead = 1;out: return n;out_entries: atomic_dec( &tbl->entries ); goto out;}static struct neighbour **myneigh_hash_alloc( unsigned int entries ){ unsigned long size = entries * sizeof(struct neighbour *); struct neighbour **ret; if (size <= PAGE_SIZE) { ret = kmalloc(size, GFP_ATOMIC); } else { ret = (struct neighbour **) __get_free_pages(GFP_ATOMIC, get_order(size)); } if (ret) memset(ret, 0, size); return ret;}static void myneigh_hash_grow( struct neigh_table *tbl, unsigned long new_entries ){ struct neighbour **new_hash, **old_hash; unsigned int i, new_hash_mask, old_entries; PR_DEEP_DEBUG( "grow the tbl hash to size: %lu\n", new_entries ); NEIGH_CACHE_STAT_INC(tbl, hash_grows); BUG_ON( new_entries & (new_entries - 1) ); new_hash = myneigh_hash_alloc( new_entries ); if (!new_hash) return; old_entries = tbl->hash_mask + 1; new_hash_mask = new_entries - 1; old_hash = tbl->hash_buckets; get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); for( i = 0; i < old_entries; i++ ){ struct neighbour *n, *next; for( n = old_hash[i]; n; n = next ){ unsigned int hash_val = tbl->hash( n->primary_key, n->dev ); hash_val &= new_hash_mask; next = n->next; n->next = new_hash[hash_val]; new_hash[hash_val] = n; } } tbl->hash_buckets = new_hash; tbl->hash_mask = new_hash_mask; myneigh_hash_free( old_hash, old_entries );}struct neighbour *myneigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev){ struct neighbour *n; int key_len = tbl->key_len; u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; PR_DEEP_DEBUG( "the pkey is %u.%u.%u.%u, the hash value is: %u\n", NIPQUAD( *((u32 *)pkey) ), hash_val ); NEIGH_CACHE_STAT_INC(tbl, lookups); read_lock_bh( &tbl->lock ); for( n = tbl->hash_buckets[hash_val]; n; n = n->next ){ if( dev == n->dev && !memcmp(n->primary_key, pkey, key_len) ){ neigh_hold( n );
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -