?? ixgbe_main.c
字號:
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2007 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497*******************************************************************************/#include <linux/types.h>#include <linux/module.h>#include <linux/pci.h>#include <linux/netdevice.h>#include <linux/vmalloc.h>#include <linux/string.h>#include <linux/in.h>#include <linux/ip.h>#include <linux/tcp.h>#include <linux/ipv6.h>#include <net/checksum.h>#include <net/ip6_checksum.h>#include <linux/ethtool.h>#include <linux/if_vlan.h>#include "ixgbe.h"#include "ixgbe_common.h"char ixgbe_driver_name[] = "ixgbe";static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver";#define DRV_VERSION "1.1.18"const char ixgbe_driver_version[] = DRV_VERSION;static const char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598AF] = &ixgbe_82598AF_info, [board_82598EB] = &ixgbe_82598EB_info, [board_82598AT] = &ixgbe_82598AT_info,};/* ixgbe_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */static struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598AF }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598AF }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT), board_82598AT }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598EB }, /* required last entry */ {0, }};MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");MODULE_LICENSE("GPL");MODULE_VERSION(DRV_VERSION);#define DEFAULT_DEBUG_LEVEL_SHIFT 3#ifdef DEBUG/** * ixgbe_get_hw_dev_name - return device name string * used by hardware layer to print debugging information **/char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw){ struct ixgbe_adapter *adapter = hw->back; struct net_device *netdev = adapter->netdev; return netdev->name;}#endifstatic void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, u8 msix_vector){ u32 ivar, index; msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = (int_alloc_entry >> 2) & 0x1F; ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);}static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, struct ixgbe_tx_buffer *tx_buffer_info){ if (tx_buffer_info->dma) { pci_unmap_page(adapter->pdev, tx_buffer_info->dma, tx_buffer_info->length, PCI_DMA_TODEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { dev_kfree_skb_any(tx_buffer_info->skb); tx_buffer_info->skb = NULL; } /* tx_buffer_info must be completely set up in the transmit path */}static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, unsigned int eop, union ixgbe_adv_tx_desc *eop_desc){ /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = false; if (tx_ring->tx_buffer_info[eop].dma && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { /* detected Tx unit hang */ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" " TDH <%x>\n" " TDT <%x>\n" " next_to_use <%x>\n" " next_to_clean <%x>\n" "tx_buffer_info[next_to_clean]\n" " time_stamp <%lx>\n" " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", readl(adapter->hw.hw_addr + tx_ring->head), readl(adapter->hw.hw_addr + tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->tx_buffer_info[eop].time_stamp, eop, jiffies, eop_desc->wb.status); return true; } return false;}/** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure **/static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring){ struct net_device *netdev = adapter->netdev; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbe_tx_buffer *tx_buffer_info; unsigned int i, eop; bool cleaned = false; int count = 0; i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { for (cleaned = false; !cleaned;) { tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; cleaned = (i == eop); tx_ring->stats.bytes += tx_buffer_info->length; ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; } tx_ring->stats.packets++; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); /* weight of a sort for tx, avoid endless transmit cleanup */ if (count++ >= tx_ring->work_limit) break; } tx_ring->next_to_clean = i;#define TX_WAKE_THRESHOLD 32 spin_lock(&tx_ring->tx_lock); if (cleaned && netif_carrier_ok(netdev) && (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && !test_bit(__IXGBE_DOWN, &adapter->state)) netif_wake_queue(netdev); spin_unlock(&tx_ring->tx_lock); if (adapter->detect_tx_hung) if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) netif_stop_queue(netdev); if (count >= tx_ring->work_limit) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); return cleaned;}/** * ixgbe_receive_skb - Send a completed packet up the stack * @adapter: board private structure * @skb: packet to send up * @is_vlan: packet has a VLAN tag * @tag: VLAN tag from descriptor **/static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, struct sk_buff *skb, bool is_vlan, u16 tag){ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { if (adapter->vlgrp && is_vlan) vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); else netif_receive_skb(skb); } else { if (adapter->vlgrp && is_vlan) vlan_hwaccel_rx(skb, adapter->vlgrp, tag); else netif_rx(skb); }}static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, u32 status_err, struct sk_buff *skb){ skb->ip_summed = CHECKSUM_NONE; /* Ignore Checksum bit is set */ if ((status_err & IXGBE_RXD_STAT_IXSM) || !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) return; /* TCP/UDP checksum error bit is set */ if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) { /* let the stack verify checksum errors */ adapter->hw_csum_rx_error++; return; } /* It must be a TCP or UDP packet with a valid checksum */ if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_rx_good++;}/** * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure **/static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int cleaned_count){ struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer_info; struct sk_buff *skb; unsigned int i; unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; i = rx_ring->next_to_use; rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); if (!rx_buffer_info->page && (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { rx_buffer_info->page = alloc_page(GFP_ATOMIC); if (!rx_buffer_info->page) { adapter->alloc_rx_page_failed++; goto no_buffers; } rx_buffer_info->page_dma = pci_map_page(pdev, rx_buffer_info->page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); } if (!rx_buffer_info->skb) { skb = netdev_alloc_skb(netdev, bufsz); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } /* * Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after * the 14 byte MAC header is removed */ skb_reserve(skb, NET_IP_ALIGN); rx_buffer_info->skb = skb; rx_buffer_info->dma = pci_map_single(pdev, skb->data, bufsz, PCI_DMA_FROMDEVICE); } /* Refresh the desc even if buffer_addrs didn't change because * each write-back erases this info. */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_desc->read.pkt_addr = cpu_to_le64(rx_buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(rx_buffer_info->dma); } else { rx_desc->read.pkt_addr = cpu_to_le64(rx_buffer_info->dma); } i++; if (i == rx_ring->count) i = 0; rx_buffer_info = &rx_ring->rx_buffer_info[i]; }no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; if (i-- == 0) i = (rx_ring->count - 1); /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); }}static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do){ struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; unsigned int i; u32 upper_len, len, staterr; u16 hdr_info, vlan_tag; bool is_vlan, cleaned = false; int cleaned_count = 0; i = rx_ring->next_to_clean; upper_len = 0; rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; is_vlan = (staterr & IXGBE_RXD_STAT_VP); vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); while (staterr & IXGBE_RXD_STAT_DD) { if (*work_done >= work_to_do) break; (*work_done)++; if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); len = ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> IXGBE_RXDADV_HDRBUFLEN_SHIFT); if (hdr_info & IXGBE_RXDADV_SPH) adapter->rx_hdr_split++; if (len > IXGBE_RX_HDR_SIZE) len = IXGBE_RX_HDR_SIZE; upper_len = le16_to_cpu(rx_desc->wb.upper.length); } else len = le16_to_cpu(rx_desc->wb.upper.length); cleaned = true; skb = rx_buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); rx_buffer_info->skb = NULL; if (len && !skb_shinfo(skb)->nr_frags) { pci_unmap_single(pdev, rx_buffer_info->dma, adapter->rx_buf_len + NET_IP_ALIGN, PCI_DMA_FROMDEVICE); skb_put(skb, len); } if (upper_len) { pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_buffer_info->page, 0, upper_len); rx_buffer_info->page = NULL; skb->len += upper_len; skb->data_len += upper_len; skb->truesize += upper_len; } i++; if (i == rx_ring->count) i = 0; next_buffer = &rx_ring->rx_buffer_info[i]; next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); prefetch(next_rxd); cleaned_count++; if (staterr & IXGBE_RXD_STAT_EOP) { rx_ring->stats.packets++; rx_ring->stats.bytes += skb->len; } else { rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; adapter->non_eop_descs++; goto next_desc; } if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { dev_kfree_skb_irq(skb); goto next_desc; } ixgbe_rx_checksum(adapter, staterr, skb); skb->protocol = eth_type_trans(skb, netdev); ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); netdev->last_rx = jiffies;next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -