?? s2io.c
字號(hào):
* The NIC supports 3 receive modes, viz * 1. single buffer, * 2. three buffer and * 3. Five buffer modes. * Each mode defines how many fragments the received frame will be split * up into by the NIC. The frame is split into L3 header, L4 Header, * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself * is split into 3 fragments. As of now only single buffer mode is supported. */int fill_rx_buffers(struct s2io_nic *nic, int ring_no){ struct net_device *dev = nic->dev; struct sk_buff *skb; RxD_t *rxdp; int off, off1, size, block_no, block_no1; int offset, offset1; u32 alloc_tab = 0; u32 alloc_cnt = nic->pkt_cnt[ring_no] - atomic_read(&nic->rx_bufs_left[ring_no]); mac_info_t *mac_control; struct config_param *config; mac_control = &nic->mac_control; config = &nic->config; if (frame_len[ring_no]) { if (frame_len[ring_no] > dev->mtu) dev->mtu = frame_len[ring_no]; size = frame_len[ring_no] + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE; } else { size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE; } while (alloc_tab < alloc_cnt) { block_no = mac_control->rx_curr_put_info[ring_no]. block_index; block_no1 = mac_control->rx_curr_get_info[ring_no]. block_index; off = mac_control->rx_curr_put_info[ring_no].offset; off1 = mac_control->rx_curr_get_info[ring_no].offset; offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off; offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1; rxdp = nic->rx_blocks[ring_no][block_no]. block_virt_addr + off; if ((offset == offset1) && (rxdp->Host_Control)) { DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); DBG_PRINT(INTR_DBG, " info equated\n"); goto end; } if (rxdp->Control_1 == END_OF_BLOCK) { mac_control->rx_curr_put_info[ring_no]. block_index++; mac_control->rx_curr_put_info[ring_no]. block_index %= nic->block_count[ring_no]; block_no = mac_control->rx_curr_put_info [ring_no].block_index; off++; off %= (MAX_RXDS_PER_BLOCK + 1); mac_control->rx_curr_put_info[ring_no].offset = off; /*rxdp = nic->rx_blocks[ring_no][block_no]. block_virt_addr + off; */ rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", dev->name, rxdp); } if (rxdp->Control_1 & RXD_OWN_XENA) { mac_control->rx_curr_put_info[ring_no]. offset = off; goto end; } skb = dev_alloc_skb(size + NET_IP_ALIGN); if (!skb) { DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); return -ENOMEM; } skb_reserve(skb, NET_IP_ALIGN); memset(rxdp, 0, sizeof(RxD_t)); rxdp->Buffer0_ptr = pci_map_single (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE); rxdp->Control_2 &= (~MASK_BUFFER0_SIZE); rxdp->Control_2 |= SET_BUFFER0_SIZE(size); rxdp->Host_Control = (unsigned long) (skb); rxdp->Control_1 |= RXD_OWN_XENA; off++; off %= (MAX_RXDS_PER_BLOCK + 1); mac_control->rx_curr_put_info[ring_no].offset = off; atomic_inc(&nic->rx_bufs_left[ring_no]); alloc_tab++; } end: return SUCCESS;}/* * Input Arguments: * device private variable. * Return Value: * NONE. * Description: * This function will free all Rx buffers allocated by host. */static void freeRxBuffers(struct s2io_nic *sp){ struct net_device *dev = sp->dev; int i, j, blk = 0, off, buf_cnt = 0; RxD_t *rxdp; struct sk_buff *skb; mac_info_t *mac_control; struct config_param *config; mac_control = &sp->mac_control; config = &sp->config; for (i = 0; i < config->RxRingNum; i++) { for (j = 0, blk = 0; j < config->RxCfg[i].NumRxd; j++) { off = j % (MAX_RXDS_PER_BLOCK + 1); rxdp = sp->rx_blocks[i][blk].block_virt_addr + off; if (rxdp->Control_1 == END_OF_BLOCK) { rxdp = (RxD_t *) ((unsigned long) rxdp-> Control_2); j++; blk++; } skb = (struct sk_buff *) ((unsigned long) rxdp-> Host_Control); if (skb) { pci_unmap_single(sp->pdev, (dma_addr_t) rxdp->Buffer0_ptr, dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); atomic_dec(&sp->rx_bufs_left[i]); buf_cnt++; } memset(rxdp, 0, sizeof(RxD_t)); } mac_control->rx_curr_put_info[i].block_index = 0; mac_control->rx_curr_get_info[i].block_index = 0; mac_control->rx_curr_put_info[i].offset = 0; mac_control->rx_curr_get_info[i].offset = 0; atomic_set(&sp->rx_bufs_left[i], 0); DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", dev->name, buf_cnt, i); }}/* * Input Argument: * dev - pointer to the device structure. * budget - The number of packets that were budgeted to be processed during * one pass through the 'Poll" function. * Return value: * 0 on success and 1 if there are No Rx packets to be processed. * Description: * Comes into picture only if NAPI support has been incorporated. It does * the same thing that rxIntrHandler does, but not in a interrupt context * also It will process only a given number of packets. */#ifdef CONFIG_S2IO_NAPIstatic int s2io_poll(struct net_device *dev, int *budget){ nic_t *nic = dev->priv; XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; int pkts_to_process = *budget, pkt_cnt = 0; register u64 val64 = 0; rx_curr_get_info_t offset_info; int i, block_no; u16 val16, cksum; struct sk_buff *skb; RxD_t *rxdp; mac_info_t *mac_control; struct config_param *config; mac_control = &nic->mac_control; config = &nic->config; if (pkts_to_process > dev->quota) pkts_to_process = dev->quota; val64 = readq(&bar0->rx_traffic_int); writeq(val64, &bar0->rx_traffic_int); for (i = 0; i < config->RxRingNum; i++) { if (--pkts_to_process < 0) { goto no_rx; } offset_info = mac_control->rx_curr_get_info[i]; block_no = offset_info.block_index; rxdp = nic->rx_blocks[i][block_no].block_virt_addr + offset_info.offset; while (!(rxdp->Control_1 & RXD_OWN_XENA)) { if (rxdp->Control_1 == END_OF_BLOCK) { rxdp = (RxD_t *) ((unsigned long) rxdp-> Control_2); offset_info.offset++; offset_info.offset %= (MAX_RXDS_PER_BLOCK + 1); block_no++; block_no %= nic->block_count[i]; mac_control->rx_curr_get_info[i]. offset = offset_info.offset; mac_control->rx_curr_get_info[i]. block_index = block_no; continue; } skb = (struct sk_buff *) ((unsigned long) rxdp-> Host_Control); if (skb == NULL) { DBG_PRINT(ERR_DBG, "%s: The skb is ", dev->name); DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); return 0; } val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2); val16 = (u16) (val64 >> 48); cksum = RXD_GET_L4_CKSUM(rxdp->Control_1); pci_unmap_single(nic->pdev, (dma_addr_t) rxdp->Buffer0_ptr, dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, PCI_DMA_FROMDEVICE); rxOsmHandler(nic, val16, rxdp, i); pkt_cnt++; offset_info.offset++; offset_info.offset %= (MAX_RXDS_PER_BLOCK + 1); rxdp = nic->rx_blocks[i][block_no].block_virt_addr + offset_info.offset; mac_control->rx_curr_get_info[i].offset = offset_info.offset; } } if (!pkt_cnt) pkt_cnt = 1; for (i = 0; i < config->RxRingNum; i++) fill_rx_buffers(nic, i); dev->quota -= pkt_cnt; *budget -= pkt_cnt; netif_rx_complete(dev);/* Re enable the Rx interrupts. */ en_dis_able_NicIntrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS); return 0; no_rx: for (i = 0; i < config->RxRingNum; i++) fill_rx_buffers(nic, i); dev->quota -= pkt_cnt; *budget -= pkt_cnt; return 1;}#else/* * Input Arguments: * device private variable. * Return Value: * NONE. * Description: * If the interrupt is because of a received frame or if the * receive ring contains fresh as yet un-processed frames, this function is * called. It picks out the RxD at which place the last Rx processing had * stopped and sends the skb to the OSM's Rx handler and then increments * the offset. */static void rxIntrHandler(struct s2io_nic *nic){ struct net_device *dev = (struct net_device *) nic->dev; XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; rx_curr_get_info_t offset_info; RxD_t *rxdp; struct sk_buff *skb; u16 val16, cksum; register u64 val64 = 0; int i, block_no; mac_info_t *mac_control; struct config_param *config; mac_control = &nic->mac_control; config = &nic->config;#if DEBUG_ON nic->rxint_cnt++;#endif/* rx_traffic_int reg is an R1 register, hence we read and write back * the samevalue in the register to clear it. */ val64 = readq(&bar0->rx_traffic_int); writeq(val64, &bar0->rx_traffic_int); for (i = 0; i < config->RxRingNum; i++) { offset_info = mac_control->rx_curr_get_info[i]; block_no = offset_info.block_index; rxdp = nic->rx_blocks[i][block_no].block_virt_addr + offset_info.offset; while (!(rxdp->Control_1 & RXD_OWN_XENA)) { if (rxdp->Control_1 == END_OF_BLOCK) { rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); offset_info.offset++; offset_info.offset %= (MAX_RXDS_PER_BLOCK + 1); block_no++; block_no %= nic->block_count[i]; mac_control->rx_curr_get_info[i]. offset = offset_info.offset; mac_control->rx_curr_get_info[i]. block_index = block_no; continue; } skb = (struct sk_buff *) ((unsigned long) rxdp->Host_Control); if (skb == NULL) { DBG_PRINT(ERR_DBG, "%s: The skb is ", dev->name); DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); return; } val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2); val16 = (u16) (val64 >> 48); cksum = RXD_GET_L4_CKSUM(rxdp->Control_1); pci_unmap_single(nic->pdev, (dma_addr_t) rxdp->Buffer0_ptr, dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, PCI_DMA_FROMDEVICE); rxOsmHandler(nic, val16, rxdp, i); offset_info.offset++; offset_info.offset %= (MAX_RXDS_PER_BLOCK + 1); rxdp = nic->rx_blocks[i][block_no].block_virt_addr + offset_info.offset; mac_control->rx_curr_get_info[i].offset = offset_info.offset; } }}#endif/* * Input Arguments: * device private variable * Return Value: * NONE * Description: * If an interrupt was raised to indicate DMA complete of the * Tx packet, this function is called. It identifies the last TxD whose buffer * was freed and frees all skbs whose data have already DMA'ed into the NICs * internal memory. */static void txIntrHandler(struct s2io_nic *nic){ XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; struct net_device *dev = (struct net_device *) nic->dev; tx_curr_get_info_t offset_info, offset_info1; struct sk_buff *skb; TxD_t *txdlp; register u64 val64 = 0; int i; u16 j, frg_cnt; mac_info_t *mac_control; struct config_param *config;#if DEBUG_ON int cnt = 0; nic->txint_cnt++;#endif mac_control = &nic->mac_control; config = &nic->config; /* tx_traffic_int reg is an R1 register, hence we read and write * back the samevalue in the register to clear it. */ val64 = readq(&bar0->tx_traffic_int); writeq(val64, &bar0->tx_traffic_int); for (i = 0; i < config->TxFIFONum; i++) { offset_info = mac_control->tx_curr_get_info[i]; offset_info1 = mac_control->tx_curr_put_info[i]; txdlp = mac_control->txdl_start[i] + (config->MaxTxDs * offset_info.offset); while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && (offset_info.offset != offset_info1.offset) && (txdlp->Host_Control)) { /* Check for TxD errors */ if (txdlp->Control_1 & TXD_T_CODE) { unsigned long long err; err = txdlp->Control_1 & TXD_T_CODE; DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err); } skb = (struct sk_buff *) ((unsigned long) txdlp->Host_Control); if (skb == NULL) { DBG_PRINT(ERR_DBG, "%s: Null skb ", dev->name); DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); return; } nic->tx_pkt_count++; frg_cnt = skb_shinfo(skb)->nr_frags; /* For unfragmented skb */ pci_unmap_single(nic->pdev, (dma_addr_t) txdlp->Buffer_Pointer, skb->len - skb->data_len, PCI_DMA_TODEVICE); if (frg_cnt) { TxD_t *temp = txdlp; txdlp++; for (j = 0; j < frg_cnt; j++, txdlp++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; pci_unmap_page(nic->pdev, (dma_addr_t) txdlp-> Buffer_Pointer, frag->size, PCI_DMA_TODEVICE); } txdlp = temp; } memset(txdlp, 0, (sizeof(TxD_t) * config->MaxTxDs));
?? 快捷鍵說(shuō)明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -