?? ccio-dma.c
字號:
ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction){ struct ioc *ioc; unsigned long flags; dma_addr_t offset = iova & ~IOVP_MASK; BUG_ON(!dev); ioc = GET_IOC(dev); DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long)iova, size); iova ^= offset; /* clear offset bits */ size += offset; size = ALIGN(size, IOVP_SIZE); spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CCIO_MAP_STATS ioc->usingle_calls++; ioc->usingle_pages += size >> IOVP_SHIFT;#endif ccio_mark_invalid(ioc, iova, size); ccio_free_range(ioc, iova, (size >> IOVP_SHIFT)); spin_unlock_irqrestore(&ioc->res_lock, flags);}/** * ccio_alloc_consistent - Allocate a consistent DMA mapping. * @dev: The PCI device. * @size: The length of the DMA region. * @dma_handle: The DMA address handed back to the device (not the cpu). * * This function implements the pci_alloc_consistent function. */static void * ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag){ void *ret;#if 0/* GRANT Need to establish hierarchy for non-PCI devs as well** and then provide matching gsc_map_xxx() functions for them as well.*/ if(!hwdev) { /* only support PCI */ *dma_handle = 0; return 0; }#endif ret = (void *) __get_free_pages(flag, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL); } return ret;}/** * ccio_free_consistent - Free a consistent DMA mapping. * @dev: The PCI device. * @size: The length of the DMA region. * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. * @dma_handle: The device address returned from the ccio_alloc_consistent. * * This function implements the pci_free_consistent function. */static void ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle){ ccio_unmap_single(dev, dma_handle, size, 0); free_pages((unsigned long)cpu_addr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x80000000UL#ifdef CCIO_MAP_STATS#define IOMMU_MAP_STATS#endif#include "iommu-helpers.h"/** * ccio_map_sg - Map the scatter/gather list into the IOMMU. * @dev: The PCI device. * @sglist: The scatter/gather list to be mapped in the IOMMU. * @nents: The number of entries in the scatter/gather list. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_map_sg function. */static intccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction){ struct ioc *ioc; int coalesced, filled = 0; unsigned long flags; unsigned long hint = hint_lookup[(int)direction]; unsigned long prev_len = 0, current_len = 0; int i; BUG_ON(!dev); ioc = GET_IOC(dev); DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); /* Fast path single entry scatterlists. */ if (nents == 1) { sg_dma_address(sglist) = ccio_map_single(dev, (void *)sg_virt_addr(sglist), sglist->length, direction); sg_dma_len(sglist) = sglist->length; return 1; } for(i = 0; i < nents; i++) prev_len += sglist[i].length; spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CCIO_MAP_STATS ioc->msg_calls++;#endif /* ** First coalesce the chunks and allocate I/O pdir space ** ** If this is one DMA stream, we can properly map using the ** correct virtual address associated with each DMA page. ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); /* ** Program the I/O Pdir ** ** map the virtual addresses to the I/O Pdir ** o dma_address will contain the pdir index ** o dma_len will contain the number of bytes to map ** o page/offset contain the virtual address. */ filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry); spin_unlock_irqrestore(&ioc->res_lock, flags); BUG_ON(coalesced != filled); DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); for (i = 0; i < filled; i++) current_len += sg_dma_len(sglist + i); BUG_ON(current_len != prev_len); return filled;}/** * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU. * @dev: The PCI device. * @sglist: The scatter/gather list to be unmapped from the IOMMU. * @nents: The number of entries in the scatter/gather list. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_unmap_sg function. */static void ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction){ struct ioc *ioc; BUG_ON(!dev); ioc = GET_IOC(dev); DBG_RUN_SG("%s() START %d entries, %08lx,%x\n", __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);#ifdef CCIO_MAP_STATS ioc->usg_calls++;#endif while(sg_dma_len(sglist) && nents--) {#ifdef CCIO_MAP_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;#endif ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); ++sglist; } DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);}static struct hppa_dma_ops ccio_ops = { .dma_supported = ccio_dma_supported, .alloc_consistent = ccio_alloc_consistent, .alloc_noncoherent = ccio_alloc_consistent, .free_consistent = ccio_free_consistent, .map_single = ccio_map_single, .unmap_single = ccio_unmap_single, .map_sg = ccio_map_sg, .unmap_sg = ccio_unmap_sg, .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */ .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */ .dma_sync_sg_for_cpu = NULL, /* ditto */ .dma_sync_sg_for_device = NULL, /* ditto */};#ifdef CONFIG_PROC_FSstatic int ccio_proc_info(struct seq_file *m, void *p){ int len = 0; struct ioc *ioc = ioc_list; while (ioc != NULL) { unsigned int total_pages = ioc->res_size << 3; unsigned long avg = 0, min, max; int j; len += seq_printf(m, "%s\n", ioc->name); len += seq_printf(m, "Cujo 2.0 bug : %s\n", (ioc->cujo20_bug ? "yes" : "no")); len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", total_pages * 8, total_pages);#ifdef CCIO_MAP_STATS len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", total_pages - ioc->used_pages, ioc->used_pages, (int)(ioc->used_pages * 100 / total_pages));#endif len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", ioc->res_size, total_pages);#ifdef CCIO_SEARCH_TIME min = max = ioc->avg_search[0]; for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) { avg += ioc->avg_search[j]; if(ioc->avg_search[j] > max) max = ioc->avg_search[j]; if(ioc->avg_search[j] < min) min = ioc->avg_search[j]; } avg /= CCIO_SEARCH_SAMPLE; len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max);#endif#ifdef CCIO_MAP_STATS len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", ioc->msingle_calls, ioc->msingle_pages, (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); /* KLUGE - unmap_sg calls unmap_single for each mapped page */ min = ioc->usingle_calls - ioc->usg_calls; max = ioc->usingle_pages - ioc->usg_pages; len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", min, max, (int)((max * 1000)/min)); len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", ioc->msg_calls, ioc->msg_pages, (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", ioc->usg_calls, ioc->usg_pages, (int)((ioc->usg_pages * 1000)/ioc->usg_calls));#endif /* CCIO_MAP_STATS */ ioc = ioc->next; } return 0;}static int ccio_proc_info_open(struct inode *inode, struct file *file){ return single_open(file, &ccio_proc_info, NULL);}static const struct file_operations ccio_proc_info_fops = { .owner = THIS_MODULE, .open = ccio_proc_info_open, .read = seq_read, .llseek = seq_lseek, .release = single_release,};static int ccio_proc_bitmap_info(struct seq_file *m, void *p){ int len = 0; struct ioc *ioc = ioc_list; while (ioc != NULL) { u32 *res_ptr = (u32 *)ioc->res_map; int j; for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) { if ((j & 7) == 0) len += seq_puts(m, "\n "); len += seq_printf(m, "%08x", *res_ptr); res_ptr++; } len += seq_puts(m, "\n\n"); ioc = ioc->next; break; /* XXX - remove me */ } return 0;}static int ccio_proc_bitmap_open(struct inode *inode, struct file *file){ return single_open(file, &ccio_proc_bitmap_info, NULL);}static const struct file_operations ccio_proc_bitmap_fops = { .owner = THIS_MODULE, .open = ccio_proc_bitmap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release,};#endif/** * ccio_find_ioc - Find the ioc in the ioc_list * @hw_path: The hardware path of the ioc. * * This function searches the ioc_list for an ioc that matches * the provide hardware path. */static struct ioc * ccio_find_ioc(int hw_path){ int i; struct ioc *ioc; ioc = ioc_list; for (i = 0; i < ioc_count; i++) { if (ioc->hw_path == hw_path) return ioc; ioc = ioc->next; } return NULL;}/** * ccio_get_iommu - Find the iommu which controls this device * @dev: The parisc device. * * This function searches through the registered IOMMU's and returns * the appropriate IOMMU for the device based on its hardware path. */void * ccio_get_iommu(const struct parisc_device *dev){ dev = find_pa_parent_type(dev, HPHW_IOA); if (!dev) return NULL; return ccio_find_ioc(dev->hw_path);}#define CUJO_20_STEP 0x10000000 /* inc upper nibble *//* Cujo 2.0 has a bug which will silently corrupt data being transferred * to/from certain pages. To avoid this happening, we mark these pages * as `used', and ensure that nothing will try to allocate from them. */void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp){ unsigned int idx; struct parisc_device *dev = parisc_parent(cujo); struct ioc *ioc = ccio_get_iommu(dev); u8 *res_ptr; ioc->cujo20_bug = 1; res_ptr = ioc->res_map; idx = PDIR_INDEX(iovp) >> 3; while (idx < ioc->res_size) { res_ptr[idx] |= 0xff; idx += PDIR_INDEX(CUJO_20_STEP) >> 3; }}#if 0/* GRANT - is this needed for U2 or not? */
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -