?? fec.c
字號:
bdp++; } }#endif if (!fep->tx_full) { netif_wake_queue(dev); }}/* The interrupt handler. * This is called from the MPC core interrupt. */static voidfec_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs){ struct net_device *dev = dev_id; volatile fec_t *fecp; uint int_events;#ifdef CONFIG_FEC_PACKETHOOK struct fec_enet_private *fep = dev->priv; __u32 regval; if (fep->ph_regaddr) regval = *fep->ph_regaddr;#endif fecp = (volatile fec_t*)dev->base_addr; /* Get the interrupt events that caused us to be here. */ while ((int_events = fecp->fec_ievent) != 0) { fecp->fec_ievent = int_events; if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR | FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) { printk("FEC ERROR %x\n", int_events); } /* Handle receive event in its own function. */ if (int_events & FEC_ENET_RXF) {#ifdef CONFIG_FEC_PACKETHOOK fec_enet_rx(dev, regval);#else fec_enet_rx(dev);#endif } /* Transmit OK, or non-fatal error. Update the buffer descriptors. FEC handles all errors, we just discover them as part of the transmit process. */ if (int_events & FEC_ENET_TXF) {#ifdef CONFIG_FEC_PACKETHOOK fec_enet_tx(dev, regval);#else fec_enet_tx(dev);#endif } if (int_events & FEC_ENET_MII) {#ifdef CONFIG_USE_MDIO fec_enet_mii(dev);#elseprintk("%s[%d] %s: unexpected FEC_ENET_MII event\n", __FILE__,__LINE__,__FUNCTION__);#endif /* CONFIG_USE_MDIO */ } }}static void#ifdef CONFIG_FEC_PACKETHOOKfec_enet_tx(struct net_device *dev, __u32 regval)#elsefec_enet_tx(struct net_device *dev)#endif{ struct fec_enet_private *fep; volatile cbd_t *bdp; struct sk_buff *skb; fep = dev->priv; spin_lock(&fep->lock); bdp = fep->dirty_tx; while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { if (bdp == fep->cur_tx && fep->tx_full == 0) break; skb = fep->tx_skbuff[fep->skb_dirty]; /* Check for errors. */ if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { fep->stats.tx_errors++; if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ fep->stats.tx_heartbeat_errors++; if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ fep->stats.tx_window_errors++; if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ fep->stats.tx_aborted_errors++; if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ fep->stats.tx_fifo_errors++; if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ fep->stats.tx_carrier_errors++; } else {#ifdef CONFIG_FEC_PACKETHOOK /* Packet hook ... */ if (fep->ph_txhandler && ((struct ethhdr *)skb->data)->h_proto == fep->ph_proto) { fep->ph_txhandler((__u8*)skb->data, skb->len, regval, fep->ph_priv); }#endif fep->stats.tx_packets++; }#ifndef final_version if (bdp->cbd_sc & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n");#endif /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (bdp->cbd_sc & BD_ENET_TX_DEF) fep->stats.collisions++; /* Free the sk buffer associated with this last transmit. */#if 0printk("TXI: %x %x %x\n", bdp, skb, fep->skb_dirty);#endif dev_kfree_skb_irq (skb/*, FREE_WRITE*/); fep->tx_skbuff[fep->skb_dirty] = NULL; fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted. */ if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; /* Since we have freed up a buffer, the ring is no longer * full. */ if (fep->tx_full) { fep->tx_full = 0; if (netif_queue_stopped(dev)) netif_wake_queue(dev); }#ifdef CONFIG_FEC_PACKETHOOK /* Re-read register. Not exactly guaranteed to be correct, but... */ if (fep->ph_regaddr) regval = *fep->ph_regaddr;#endif } fep->dirty_tx = (cbd_t *)bdp; spin_unlock(&fep->lock);}/* During a receive, the cur_rx points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. */static void#ifdef CONFIG_FEC_PACKETHOOKfec_enet_rx(struct net_device *dev, __u32 regval)#elsefec_enet_rx(struct net_device *dev)#endif{ struct fec_enet_private *fep; volatile fec_t *fecp; volatile cbd_t *bdp; struct sk_buff *skb; ushort pkt_len; __u8 *data; fep = dev->priv; fecp = (volatile fec_t*)dev->base_addr; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx;while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {#ifndef final_version /* Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) printk("FEC ENET: rcv is not +last\n");#endif /* Check for errors. */ if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { fep->stats.rx_errors++; if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ fep->stats.rx_length_errors++; } if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ fep->stats.rx_frame_errors++; if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ fep->stats.rx_crc_errors++; if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ fep->stats.rx_crc_errors++; } /* Report late collisions as a frame error. * On this error, the BD is closed, but we don't know what we * have in the buffer. So, just drop this frame on the floor. */ if (bdp->cbd_sc & BD_ENET_RX_CL) { fep->stats.rx_errors++; fep->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ fep->stats.rx_packets++; pkt_len = bdp->cbd_datlen; fep->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr);#ifdef CONFIG_FEC_PACKETHOOK /* Packet hook ... */ if (fep->ph_rxhandler) { if (((struct ethhdr *)data)->h_proto == fep->ph_proto) { switch (fep->ph_rxhandler(data, pkt_len, regval, fep->ph_priv)) { case 1: goto rx_processing_done; break; case 0: break; default: fep->stats.rx_errors++; goto rx_processing_done; } } } /* If it wasn't filtered - copy it to an sk buffer. */#endif /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up * bridging applications. */ skb = dev_alloc_skb(pkt_len-4); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); fep->stats.rx_dropped++; } else { skb->dev = dev; skb_put(skb,pkt_len-4); /* Make room */ eth_copy_and_sum(skb, (unsigned char *)__va(bdp->cbd_bufaddr), pkt_len-4, 0); skb->protocol=eth_type_trans(skb,dev);#ifdef DEBUG_MULTICAST if (bdp->cbd_sc & BD_ENET_RX_MC) { printk ("%s: Received Multicast packet DA: " "%2x:%2x:%2x:%2x:%2x:%2x\n", dev->name, (int)data[0], (int)data[1], (int)data[2], (int)data[3], (int)data[4], (int)data[5] ); }#endif netif_rx(skb); }rx_processing_done: /* Clear the status flags for this buffer. */ bdp->cbd_sc &= ~BD_ENET_RX_STATS; /* Mark the buffer empty. */ bdp->cbd_sc |= BD_ENET_RX_EMPTY; /* Update BD pointer to next entry. */ if (bdp->cbd_sc & BD_ENET_RX_WRAP) bdp = fep->rx_bd_base; else bdp++;#if 1 /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ fecp->fec_r_des_active = 0x01000000;#endif#ifdef CONFIG_FEC_PACKETHOOK /* Re-read register. Not exactly guaranteed to be correct, but... */ if (fep->ph_regaddr) regval = *fep->ph_regaddr;#endif } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ fep->cur_rx = (cbd_t *)bdp;#if 0 /* Doing this here will allow us to process all frames in the * ring before the FEC is allowed to put more there. On a heavily * loaded network, some frames may be lost. Unfortunately, this * increases the interrupt overhead since we can potentially work * our way back to the interrupt return only to come right back * here. */ fecp->fec_r_des_active = 0x01000000;#endif}#ifdef CONFIG_USE_MDIOstatic voidfec_enet_mii(struct net_device *dev){ struct fec_enet_private *fep; volatile fec_t *ep; mii_list_t *mip; uint mii_reg; fep = (struct fec_enet_private *)dev->priv; ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec); mii_reg = ep->fec_mii_data; if ((mip = mii_head) == NULL) { printk("MII and no head!\n"); return; } if (mip->mii_func != NULL) (*(mip->mii_func))(mii_reg, dev, mip->mii_data); mii_head = mip->mii_next; mip->mii_next = mii_free; mii_free = mip; if ((mip = mii_head) != NULL) { ep->fec_mii_data = mip->mii_regval; }}static intmii_queue (struct net_device *dev, int regval, void (*func)(uint, struct net_device *, uint), uint data){ struct fec_enet_private *fep; unsigned long flags; mii_list_t *mip; int retval; /* Add PHY address to register command. */ fep = dev->priv; regval |= fep->phy_addr << 23; retval = 0; save_flags(flags); cli(); if ((mip = mii_free) != NULL) { mii_free = mip->mii_next; mip->mii_regval = regval; mip->mii_func = func; mip->mii_next = NULL; mip->mii_data = data; if (mii_head) { mii_tail->mii_next = mip; mii_tail = mip; } else { mii_head = mii_tail = mip; (&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec))->fec_mii_data = regval; } } else { retval = 1; } restore_flags(flags); return(retval);}static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c){ int k; if(!c) return; for(k = 0; (c+k)->mii_data != mk_mii_end; k++) mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0);}static void mii_parse_sr(uint mii_reg, struct net_device *dev, uint data){ volatile struct fec_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); if (mii_reg & 0x0004) s |= PHY_STAT_LINK; if (mii_reg & 0x0010) s |= PHY_STAT_FAULT; if (mii_reg & 0x0020) s |= PHY_STAT_ANC; fep->phy_status = s; fep->link = (s & PHY_STAT_LINK) ? 1 : 0;}static void mii_parse_cr(uint mii_reg, struct net_device *dev, uint data){ volatile struct fec_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); if (mii_reg & 0x1000) s |= PHY_CONF_ANE; if (mii_reg & 0x4000) s |= PHY_CONF_LOOP; fep->phy_status = s;}static void mii_parse_anar(uint mii_reg, struct net_device *dev, uint data){ volatile struct fec_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_CONF_SPMASK); if (mii_reg & 0x0020) s |= PHY_CONF_10HDX; if (mii_reg & 0x0040) s |= PHY_CONF_10FDX; if (mii_reg & 0x0080) s |= PHY_CONF_100HDX; if (mii_reg & 0x0100) s |= PHY_CONF_100FDX; fep->phy_status = s;}#if 0static void mii_disp_reg(uint mii_reg, struct net_device *dev, uint data){ printk("reg %u = 0x%04x\n", (mii_reg >> 18) & 0x1f, mii_reg & 0xffff);}#endif/* ------------------------------------------------------------------------- *//* The Level one LXT970 is used by many boards */#ifdef CONFIG_FEC_LXT970#define MII_LXT970_MIRROR 16 /* Mirror register */#define MII_LXT970_IER 17 /* Interrupt Enable Register */#define MII_LXT970_ISR 18 /* Interrupt Status Register */#define MII_LXT970_CONFIG 19 /* Configuration Register */#define MII_LXT970_CSR 20 /* Chip Status Register */static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev, uint data){ volatile struct fec_enet_private *fep = dev->priv; uint s = fep->phy_status; s &= ~(PHY_STAT_SPMASK); if (mii_reg & 0x0800) { if (mii_reg & 0x1000) s |= PHY_STAT_100FDX; else s |= PHY_STAT_100HDX; } else { if (mii_reg & 0x1000) s |= PHY_STAT_10FDX; else
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -