?? ccio-dma.c
字號(hào):
#define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */#define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE *//*** Use direction (ie PCI_DMA_TODEVICE) to pick hint.** ccio_alloc_consistent() depends on this to get SAFE_DMA** when it passes in BIDIRECTIONAL flag.*/static u32 hint_lookup[] = { [PCI_DMA_BIDIRECTIONAL] HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID, [PCI_DMA_TODEVICE] HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID, [PCI_DMA_FROMDEVICE] HINT_STOP_MOST | IOPDIR_VALID, [PCI_DMA_NONE] 0, /* not valid */};/** * ccio_io_pdir_entry - Initialize an I/O Pdir. * @pdir_ptr: A pointer into I/O Pdir. * @sid: The Space Identifier. * @vba: The virtual address. * @hints: The DMA Hint. * * Given a virtual address (vba, arg2) and space id, (sid, arg1), * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir * entry consists of 8 bytes as shown below (MSB == bit 0): * * * WORD 0: * +------+----------------+-----------------------------------------------+ * | Phys | Virtual Index | Phys | * | 0:3 | 0:11 | 4:19 | * |4 bits| 12 bits | 16 bits | * +------+----------------+-----------------------------------------------+ * WORD 1: * +-----------------------+-----------------------------------------------+ * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid | * | 20:39 | | Enable |Enable | |Enable|DMA | | * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit | * +-----------------------+-----------------------------------------------+ * * The virtual index field is filled with the results of the LCI * (Load Coherence Index) instruction. The 8 bits used for the virtual * index are bits 12:19 of the value returned by LCI. */ void CCIO_INLINEccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, void * vba, unsigned long hints){ register unsigned long pa = (volatile unsigned long) vba; register unsigned long ci; /* coherent index */ /* We currently only support kernel addresses */ ASSERT(sid == KERNEL_SPACE); mtsp(sid,1); /* ** WORD 1 - low order word ** "hints" parm includes the VALID bit! ** "dep" clobbers the physical address offset bits as well. */ pa = virt_to_phys(vba); asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); ((u32 *)pdir_ptr)[1] = (u32) pa; /* ** WORD 0 - high order word */#ifdef __LP64__ /* ** get bits 12:15 of physical address ** shift bits 16:31 of physical address ** and deposit them */ asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa)); asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa)); asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));#else pa = 0;#endif /* ** get CPU coherency index bits ** Grab virtual index [0:11] ** Deposit virt_idx bits into I/O PDIR word */ asm volatile ("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); ((u32 *)pdir_ptr)[0] = (u32) pa; /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) ** PCX-U/U+ do. (eg C200/C240) ** PCX-T'? Don't know. (eg C110 or similar K-class) ** ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit". ** Hopefully we can patch (NOP) these out at boot time somehow. ** ** "Since PCX-U employs an offset hash that is incompatible with ** the real mode coherence index generation of U2, the PDIR entry ** must be flushed to memory to retain coherence." */ asm volatile("fdc 0(%0)" : : "r" (pdir_ptr)); asm volatile("sync");}/** * ccio_clear_io_tlb - Remove stale entries from the I/O TLB. * @ioc: The I/O Controller. * @iovp: The I/O Virtual Page. * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. * * Purge invalid I/O PDIR entries from the I/O TLB. * * FIXME: Can we change the byte_cnt to pages_mapped? */static CCIO_INLINE voidccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt){ u32 chain_size = 1 << ioc->chainid_shift; iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */ byte_cnt += chain_size; while(byte_cnt > chain_size) { WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_hpa->io_command); iovp += chain_size; byte_cnt -= chain_size; }}/** * ccio_mark_invalid - Mark the I/O Pdir entries invalid. * @ioc: The I/O Controller. * @iova: The I/O Virtual Address. * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. * * Mark the I/O Pdir entries invalid and blow away the corresponding I/O * TLB entries. * * FIXME: at some threshhold it might be "cheaper" to just blow * away the entire I/O TLB instead of individual entries. * * FIXME: Uturn has 256 TLB entries. We don't need to purge every * PDIR entry - just once for each possible TLB entry. * (We do need to maker I/O PDIR entries invalid regardless). * * FIXME: Can we change byte_cnt to pages_mapped? */ static CCIO_INLINE voidccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt){ u32 iovp = (u32)CCIO_IOVP(iova); size_t saved_byte_cnt; /* round up to nearest page size */ saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE); while(byte_cnt > 0) { /* invalidate one page at a time */ unsigned int idx = PDIR_INDEX(iovp); char *pdir_ptr = (char *) &(ioc->pdir_base[idx]); ASSERT(idx < (ioc->pdir_size / sizeof(u64))); pdir_ptr[7] = 0; /* clear only VALID bit */ /* ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) ** PCX-U/U+ do. (eg C200/C240) ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit". ** ** Hopefully someone figures out how to patch (NOP) the ** FDC/SYNC out at boot time. */ asm volatile("fdc 0(%0)" : : "r" (pdir_ptr[7])); iovp += IOVP_SIZE; byte_cnt -= IOVP_SIZE; } asm volatile("sync"); ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);}/******************************************************************** CCIO dma_ops*******************************************************************//** * ccio_dma_supported - Verify the IOMMU supports the DMA address range. * @dev: The PCI device. * @mask: A bit mask describing the DMA address range of the device. * * This function implements the pci_dma_supported function. */static int ccio_dma_supported(struct pci_dev *dev, u64 mask){ if(dev == NULL) { printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); BUG(); return 0; } dev->dma_mask = mask; /* save it */ /* only support 32-bit devices (ie PCI/GSC) */ return (int)(mask == 0xffffffffUL);}/** * ccio_map_single - Map an address range into the IOMMU. * @dev: The PCI device. * @addr: The start address of the DMA region. * @size: The length of the DMA region. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_map_single function. */static dma_addr_t ccio_map_single(struct pci_dev *dev, void *addr, size_t size, int direction){ int idx; struct ioc *ioc; unsigned long flags; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; unsigned long hint = hint_lookup[direction]; ASSERT(dev); ASSERT(dev->sysdata); ASSERT(HBA_DATA(dev->sysdata)->iommu); ioc = GET_IOC(dev); ASSERT(size > 0); /* save offset bits */ offset = ((unsigned long) addr) & ~IOVP_MASK; /* round up to nearest IOVP_SIZE */ size = ROUNDUP(size + offset, IOVP_SIZE); spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CONFIG_PROC_FS ioc->msingle_calls++; ioc->msingle_pages += size >> IOVP_SHIFT;#endif idx = ccio_alloc_range(ioc, (size >> IOVP_SHIFT)); iovp = (dma_addr_t)MKIOVP(idx); pdir_start = &(ioc->pdir_base[idx]); DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", __FUNCTION__, addr, (long)iovp | offset, size); /* If not cacheline aligned, force SAFE_DMA on the whole mess */ if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) hint |= HINT_SAFE_DMA; while(size > 0) { ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint); DBG_RUN(" pdir %p %08x%08x\n", pdir_start, (u32) (((u32 *) pdir_start)[0]), (u32) (((u32 *) pdir_start)[1])); ++pdir_start; addr += IOVP_SIZE; size -= IOVP_SIZE; } spin_unlock_irqrestore(&ioc->res_lock, flags); /* form complete address */ return CCIO_IOVA(iovp, offset);}/** * ccio_unmap_single - Unmap an address range from the IOMMU. * @dev: The PCI device. * @addr: The start address of the DMA region. * @size: The length of the DMA region. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_unmap_single function. */static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, int direction){ struct ioc *ioc; unsigned long flags; dma_addr_t offset = iova & ~IOVP_MASK; ASSERT(dev); ASSERT(dev->sysdata); ASSERT(HBA_DATA(dev->sysdata)->iommu); ioc = GET_IOC(dev); DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long)iova, size); iova ^= offset; /* clear offset bits */ size += offset; size = ROUNDUP(size, IOVP_SIZE); spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CONFIG_PROC_FS ioc->usingle_calls++; ioc->usingle_pages += size >> IOVP_SHIFT;#endif ccio_mark_invalid(ioc, iova, size); ccio_free_range(ioc, iova, (size >> IOVP_SHIFT)); spin_unlock_irqrestore(&ioc->res_lock, flags);}/** * ccio_alloc_consistent - Allocate a consistent DMA mapping. * @dev: The PCI device. * @size: The length of the DMA region. * @dma_handle: The DMA address handed back to the device (not the cpu). * * This function implements the pci_alloc_consistent function. */static void * ccio_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma_handle){ void *ret;#if 0/* GRANT Need to establish hierarchy for non-PCI devs as well** and then provide matching gsc_map_xxx() functions for them as well.*/ if(!hwdev) { /* only support PCI */ *dma_handle = 0; return 0; }#endif ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size)); if (ret) { memset(ret, 0, size); *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL); } return ret;}/** * ccio_free_consistent - Free a consistent DMA mapping. * @dev: The PCI device. * @size: The length of the DMA region. * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. * @dma_handle: The device address returned from the ccio_alloc_consistent. * * This function implements the pci_free_consistent function. */static void ccio_free_consistent(struct pci_dev *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle){ ccio_unmap_single(dev, dma_handle, size, 0); free_pages((unsigned long)cpu_addr, get_order(size));}/*** Since 0 is a valid pdir_base index value, can't use that** to determine if a value is valid or not. Use a flag to indicate** the SG list entry contains a valid pdir index.*/#define PIDE_FLAG 0x80000000UL/** * ccio_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. * @ioc: The I/O Controller. * @startsg: The scatter/gather list of coalesced chunks. * @nents: The number of entries in the scatter/gather list. * @hint: The DMA Hint. * * This function inserts the coalesced scatter/gather list chunks into the * I/O Controller's I/O Pdir. */ static CCIO_INLINE intccio_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, unsigned long hint){ struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ int n_mappings = 0; u64 *pdirp = 0; unsigned long dma_offset = 0; dma_sg--; while (nents-- > 0) { int cnt = sg_dma_len(startsg); sg_dma_len(startsg) = 0; DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -