?? ccio-dma.c
字號:
); /* ** Look for the start of a new DMA stream */ if(sg_dma_address(startsg) & PIDE_FLAG) { u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG; dma_offset = (unsigned long) pide & ~IOVP_MASK; sg_dma_address(startsg) = 0; dma_sg++; sg_dma_address(dma_sg) = pide; pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); n_mappings++; } /* ** Look for a VCONTIG chunk */ if (cnt) { unsigned long vaddr = (unsigned long) sg_virt_addr(startsg); ASSERT(pdirp); /* Since multiple Vcontig blocks could make up ** one DMA stream, *add* cnt to dma_len. */ sg_dma_len(dma_sg) += cnt; cnt += dma_offset; dma_offset=0; /* only want offset on first chunk */ cnt = ROUNDUP(cnt, IOVP_SIZE);#ifdef CONFIG_PROC_FS ioc->msg_pages += cnt >> IOVP_SHIFT;#endif do { ccio_io_pdir_entry(pdirp, KERNEL_SPACE, (void *)vaddr, hint); vaddr += IOVP_SIZE; cnt -= IOVP_SIZE; pdirp++; } while (cnt > 0); } startsg++; } return(n_mappings);}/*** First pass is to walk the SG list and determine where the breaks are** in the DMA stream. Allocates PDIR entries but does not fill them.** Returns the number of DMA chunks.**** Doing the fill seperate from the coalescing/allocation keeps the** code simpler. Future enhancement could make one pass through** the sglist do both.*/static CCIO_INLINE intccio_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents){ struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ unsigned long vcontig_len; /* len of VCONTIG chunk */ unsigned long vcontig_end; struct scatterlist *dma_sg; /* next DMA stream head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ int n_mappings = 0; while (nents > 0) { /* ** Prepare for first/next DMA stream */ dma_sg = vcontig_sg = startsg; dma_len = vcontig_len = vcontig_end = startsg->length; vcontig_end += (unsigned long) sg_virt_addr(startsg); dma_offset = (unsigned long) sg_virt_addr(startsg) & ~IOVP_MASK; /* PARANOID: clear entries */ sg_dma_address(startsg) = 0; sg_dma_len(startsg) = 0; /* ** This loop terminates one iteration "early" since ** it's always looking one "ahead". */ while(--nents > 0) { unsigned long startsg_end; startsg++; startsg_end = (unsigned long) sg_virt_addr(startsg) + startsg->length; /* PARANOID: clear entries */ sg_dma_address(startsg) = 0; sg_dma_len(startsg) = 0; /* ** First make sure current dma stream won't ** exceed DMA_CHUNK_SIZE if we coalesce the ** next entry. */ if(ROUNDUP(dma_len + dma_offset + startsg->length, IOVP_SIZE) > DMA_CHUNK_SIZE) break; /* ** Append the next transaction? */ if(vcontig_end == (unsigned long) sg_virt_addr(startsg)) { vcontig_len += startsg->length; vcontig_end += startsg->length; dma_len += startsg->length; continue; } /* ** Not virtually contigous. ** Terminate prev chunk. ** Start a new chunk. ** ** Once we start a new VCONTIG chunk, dma_offset ** can't change. And we need the offset from the first ** chunk - not the last one. Ergo Successive chunks ** must start on page boundaries and dove tail ** with it's predecessor. */ sg_dma_len(vcontig_sg) = vcontig_len; vcontig_sg = startsg; vcontig_len = startsg->length; break; } /* ** End of DMA Stream ** Terminate last VCONTIG block. ** Allocate space for DMA stream. */ sg_dma_len(vcontig_sg) = vcontig_len; dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE); sg_dma_address(dma_sg) = PIDE_FLAG | (ccio_alloc_range(ioc, (dma_len >> IOVP_SHIFT)) << IOVP_SHIFT) | dma_offset; n_mappings++; } return n_mappings;}/** * ccio_map_sg - Map the scatter/gather list into the IOMMU. * @dev: The PCI device. * @sglist: The scatter/gather list to be mapped in the IOMMU. * @nents: The number of entries in the scatter/gather list. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_map_sg function. */static intccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction){ struct ioc *ioc; int coalesced, filled = 0; unsigned long flags; unsigned long hint = hint_lookup[direction]; ASSERT(dev); ASSERT(dev->sysdata); ASSERT(HBA_DATA(dev->sysdata)->iommu); ioc = GET_IOC(dev); DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); /* Fast path single entry scatterlists. */ if(nents == 1) { sg_dma_address(sglist)= ccio_map_single(dev, sg_virt_addr(sglist), sglist->length, direction); sg_dma_len(sglist)= sglist->length; return 1; } spin_lock_irqsave(&ioc->res_lock, flags);#ifdef CONFIG_PROC_FS ioc->msg_calls++;#endif /* ** First coalesce the chunks and allocate I/O pdir space ** ** If this is one DMA stream, we can properly map using the ** correct virtual address associated with each DMA page. ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = ccio_coalesce_chunks(ioc, sglist, nents); /* ** Program the I/O Pdir ** ** map the virtual addresses to the I/O Pdir ** o dma_address will contain the pdir index ** o dma_len will contain the number of bytes to map ** o address contains the virtual address. */ filled = ccio_fill_pdir(ioc, sglist, nents, hint); spin_unlock_irqrestore(&ioc->res_lock, flags); ASSERT(coalesced == filled); DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); return filled;}/** * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU. * @dev: The PCI device. * @sglist: The scatter/gather list to be unmapped from the IOMMU. * @nents: The number of entries in the scatter/gather list. * @direction: The direction of the DMA transaction (to/from device). * * This function implements the pci_unmap_sg function. */static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction){ struct ioc *ioc; ASSERT(dev); ASSERT(dev->sysdata); ASSERT(HBA_DATA(dev->sysdata)->iommu); ioc = GET_IOC(dev); DBG_RUN_SG("%s() START %d entries, %p,%x\n", __FUNCTION__, nents, sg_virt_address(sglist), sglist->length);#ifdef CONFIG_PROC_FS ioc->usg_calls++;#endif while(sg_dma_len(sglist) && nents--) {#ifdef CONFIG_PROC_FS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;#endif ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); ++sglist; } DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);}static struct pci_dma_ops ccio_ops = { ccio_dma_supported, ccio_alloc_consistent, ccio_free_consistent, ccio_map_single, ccio_unmap_single, ccio_map_sg, ccio_unmap_sg, NULL, /* dma_sync_single : NOP for U2/Uturn */ NULL, /* dma_sync_sg : ditto */};#ifdef CONFIG_PROC_FSstatic int proc_append(char *src, int len, char **dst, off_t *offset, int *max){ if (len < *offset) { *offset -= len; return 0; } if (*offset > 0) { src += *offset; len -= *offset; *offset = 0; } if (len > *max) { len = *max; } memcpy(*dst, src, len); *dst += len; *max -= len; return (*max == 0);}static int ccio_proc_info(char *buf, char **start, off_t offset, int count, int *eof, void *data){ int max = count; char tmp[80]; /* width of an ANSI-standard terminal */ struct ioc *ioc = ioc_list; while (ioc != NULL) { unsigned int total_pages = ioc->res_size << 3; unsigned long avg = 0, min, max; int j, len; len = sprintf(tmp, "%s\n", ioc->name); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "Cujo 2.0 bug : %s\n", (ioc->cujo20_bug ? "yes" : "no")); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "IO PDIR size : %d bytes (%d entries)\n", total_pages * 8, total_pages); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "IO PDIR entries : %ld free %ld used (%d%%)\n", total_pages - ioc->used_pages, ioc->used_pages, (int)(ioc->used_pages * 100 / total_pages)); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "Resource bitmap : %d bytes (%d pages)\n", ioc->res_size, total_pages); if (proc_append(tmp, len, &buf, &offset, &count)) break; min = max = ioc->avg_search[0]; for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) { avg += ioc->avg_search[j]; if(ioc->avg_search[j] > max) max = ioc->avg_search[j]; if(ioc->avg_search[j] < min) min = ioc->avg_search[j]; } avg /= CCIO_SEARCH_SAMPLE; len = sprintf(tmp, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", ioc->msingle_calls, ioc->msingle_pages, (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); if (proc_append(tmp, len, &buf, &offset, &count)) break; /* KLUGE - unmap_sg calls unmap_single for each mapped page */ min = ioc->usingle_calls - ioc->usg_calls; max = ioc->usingle_pages - ioc->usg_pages; len = sprintf(tmp, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", min, max, (int)((max * 1000)/min)); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", ioc->msg_calls, ioc->msg_pages, (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); if (proc_append(tmp, len, &buf, &offset, &count)) break; len = sprintf(tmp, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", ioc->usg_calls, ioc->usg_pages, (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); if (proc_append(tmp, len, &buf, &offset, &count)) break; ioc = ioc->next; } if (count == 0) { *eof = 1; } return (max - count);}static int ccio_resource_map(char *buf, char **start, off_t offset, int len, int *eof, void *data){ struct ioc *ioc = ioc_list; buf[0] = '\0'; while (ioc != NULL) { u32 *res_ptr = (u32 *)ioc->res_map; int j; for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) { if ((j & 7) == 0) strcat(buf,"\n "); sprintf(buf, "%s %08x", buf, *res_ptr); res_ptr++; } strcat(buf, "\n\n"); ioc = ioc->next; break; /* XXX - remove me */ } return strlen(buf);}#endif/** * ccio_find_ioc - Find the ioc in the ioc_list * @hw_path: The hardware path of the ioc.
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -