?? davinci_mmc.c
字號:
dst_port = MMCSD_REGS_BASE_ADDR + 0x2C; mode_dst = INCR; fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */ dst_bidx = 0; dst_cidx = 0; bcntrld = 8; sync_mode = ABSYNC; } else { sync_dev = DAVINCI_DMA_MMCRXEVT; /* Read */ acnt = 4; bcnt = 8; if (num_eight_words > MAX_C_CNT) { temp_ccnt = MAX_C_CNT; ccnt = temp_ccnt; } else { ccnt = num_eight_words; temp_ccnt = ccnt; } src_port = MMCSD_REGS_BASE_ADDR + 0x28; mode_src = INCR; fifo_width_src = W8BIT; src_bidx = 0; src_cidx = 0; dst_port = (unsigned int)virt_to_phys(req->data->req->buffer); mode_dst = INCR; fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */ dst_bidx = 4; dst_cidx = 32; bcntrld = 8; sync_mode = ABSYNC; } davinci_set_dma_src_params(sync_dev, src_port, mode_src, fifo_width_src); davinci_set_dma_dest_params(sync_dev, dst_port, mode_dst, fifo_width_dst); davinci_set_dma_src_index(sync_dev, src_bidx, src_cidx); davinci_set_dma_dest_index(sync_dev, dst_bidx, dst_cidx); davinci_set_dma_transfer_params(sync_dev, acnt, bcnt, ccnt, bcntrld, sync_mode); host->edma_ch_details.cnt_chanel = 0; davinci_get_dma_params(sync_dev, &temp); if (sync_dev == DAVINCI_DMA_MMCTXEVT) { if (option_write == 0) { option_write = temp.opt; } else { temp.opt = option_write; davinci_set_dma_params(sync_dev, &temp); } } if (sync_dev == DAVINCI_DMA_MMCRXEVT) { if (option_read == 0) { option_read = temp.opt; } else { temp.opt = option_read; davinci_set_dma_params(sync_dev, &temp); } } if (num_eight_words > MAX_C_CNT) { /* Linking will be performed */ davinci_get_dma_params(sync_dev, &temp); temp.opt &= ~TCINTEN; davinci_set_dma_params(sync_dev, &temp); for (i = 0; i < EDMA_MAX_LOGICAL_CHA_ALLOWED; i++) { if (i != 0) { j = i - 1; davinci_get_dma_params(host->edma_ch_details. chanel_num[j], &temp); temp.opt &= ~TCINTEN; davinci_set_dma_params(host->edma_ch_details. chanel_num[j], &temp); } host->edma_ch_details.cnt_chanel++; davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "LINK", NULL, NULL, &edma_chan_num, &sync_dev, queue_no); host->edma_ch_details.chanel_num[i] = edma_chan_num; ccnt = temp.ccnt & 0x0000FFFF; if (sync_dev == DAVINCI_DMA_MMCTXEVT) { temp.src = temp.src + (acnt * bcnt * ccnt); } else { temp.dst = temp.dst + (acnt * bcnt * ccnt); } temp.opt |= TCINTEN; if ((num_eight_words - temp_ccnt) > MAX_C_CNT) { temp.ccnt = (temp.ccnt & 0xFFFF0000) | MAX_C_CNT; ccnt = temp.ccnt & 0x0000FFFF; temp_ccnt = temp_ccnt + ccnt; } else { temp.ccnt = (temp. ccnt & 0xFFFF0000) | (num_eight_words - temp_ccnt); ccnt = temp.ccnt & 0x0000FFFF; temp_ccnt = temp_ccnt + ccnt; } davinci_set_dma_params(edma_chan_num, &temp); if (i != 0) { j = i - 1; davinci_dma_link_lch(host->edma_ch_details. chanel_num[j], edma_chan_num); } if (temp_ccnt == num_eight_words) { break; } } davinci_dma_link_lch(sync_dev, host->edma_ch_details.chanel_num[0]); } davinci_start_dma(sync_dev); return 0;}static void mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req){ int timeout; host->data = req->data; if (req->data == NULL) { host->data_dir = DAVINCI_MMC_DATADIR_NONE; mmcsd_regs->mmc_blen = 0; mmcsd_regs->mmc_nblk = 0; return; } dev_dbg(&mmc_dev, "MMCSD : Data xfer (%s %s), " "DTO %d cycles + %d ns, %d blocks of %d bytes\r\n", (req->data->flags & MMC_DATA_STREAM) ? "stream" : "block", (req->data->flags & MMC_DATA_WRITE) ? "write" : "read", req->data->timeout_clks, req->data->timeout_ns, req->data->blocks, 1 << req->data->blksz_bits); /* Convert ns to clock cycles by assuming 20MHz frequency * 1 cycle at 20MHz = 500 ns */ timeout = req->data->timeout_clks + req->data->timeout_ns / 500; if (timeout > 0xffff) { timeout = 0xffff; } mmcsd_regs->mmc_tod = timeout; mmcsd_regs->mmc_nblk = req->data->blocks; mmcsd_regs->mmc_blen = (1 << req->data->blksz_bits); host->data_dir = (req->data->flags & MMC_DATA_WRITE) ? DAVINCI_MMC_DATADIR_WRITE : DAVINCI_MMC_DATADIR_READ; /* Configure the FIFO */ switch (host->data_dir) { case DAVINCI_MMC_DATADIR_WRITE: mmcsd_regs->mmc_fifo_ctl = mmcsd_regs->mmc_fifo_ctl | 0x1; mmcsd_regs->mmc_fifo_ctl = 0x0; mmcsd_regs->mmc_fifo_ctl = mmcsd_regs->mmc_fifo_ctl | (1 << 1); mmcsd_regs->mmc_fifo_ctl = mmcsd_regs->mmc_fifo_ctl | (1 << 2); break; case DAVINCI_MMC_DATADIR_READ: mmcsd_regs->mmc_fifo_ctl = mmcsd_regs->mmc_fifo_ctl | 0x1; mmcsd_regs->mmc_fifo_ctl = 0x0; mmcsd_regs->mmc_fifo_ctl = mmcsd_regs->mmc_fifo_ctl | (1 << 2); break; default: break; } if ((host->use_dma == 1) && (mmc_davinci_start_dma_transfer(host, req) == 0)) { host->buffer = NULL; host->bytes_left = 0; } else { /* Revert to CPU Copy */ host->buffer = (u32 *) (req->data->req->buffer); host->bytes_left = req->data->blocks * (1 << req->data->blksz_bits); host->use_dma = 0; }}static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req){ struct mmc_davinci_host *host = mmc_priv(mmc); unsigned long flags; if (!is_card_detect_progress) { spin_lock_irqsave(&mmc_lock, flags); is_card_busy = 1; spin_unlock_irqrestore(&mmc_lock, flags); mmc_davinci_prepare_data(host, req); mmc_davinci_start_command(host, req->cmd); } else { /* Queue up the request as card dectection is being excuted */ que_mmc_host = mmc; que_mmc_request = req; spin_lock_irqsave(&mmc_lock, flags); is_req_queued_up = 1; spin_unlock_irqrestore(&mmc_lock, flags); }}static unsigned int calculate_freq_for_card(unsigned int mmc_req_freq){ unsigned int mmc_freq = 0, cpu_arm_clk = 0, mmc_push_pull = 0; cpu_arm_clk = mmc_input_clk; if (cpu_arm_clk > (2 * mmc_req_freq)) { mmc_push_pull = ((unsigned int)cpu_arm_clk / (2 * mmc_req_freq)) - 1; } else { mmc_push_pull = 0; } mmc_freq = (unsigned int)cpu_arm_clk / (2 * (mmc_push_pull + 1)); if (mmc_freq > mmc_req_freq) { mmc_push_pull = mmc_push_pull + 1; } return mmc_push_pull;}static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios){ unsigned short status; unsigned int open_drain_freq = 0, cpu_arm_clk = 0; unsigned int mmc_push_pull_freq = 0; struct mmc_davinci_host *host = mmc_priv(mmc); cpu_arm_clk = mmc_input_clk; dev_dbg(&mmc_dev, "clock %dHz busmode %d powermode %d Vdd %d.%02d\r\n", ios->clock, ios->bus_mode, ios->power_mode, ios->vdd / 100, ios->vdd % 100); if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { open_drain_freq = ((unsigned int)cpu_arm_clk / (2 * MMCSD_INIT_CLOCK)) - 1; mmcsd_regs->mmc_clk = (mmcsd_regs->mmc_clk & ~(0xFF)) | open_drain_freq; } else { mmc_push_pull_freq = calculate_freq_for_card(ios->clock); mmcsd_regs->mmc_clk = (mmcsd_regs->mmc_clk & ~(0xFF)) | mmc_push_pull_freq; } host->bus_mode = ios->bus_mode; if (ios->power_mode == MMC_POWER_UP) { /* Send clock cycles, poll completion */ mmcsd_regs->mmc_arghl = 0x0; mmcsd_regs->mmc_cmd = 0x4000; status = 0; while (!(status & (MMCSD_EVENT_EOFCMD))) { status = mmcsd_regs->mmc_st0; } }}static void mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data){ unsigned long flags; host->data = NULL; host->data_dir = DAVINCI_MMC_DATADIR_NONE; if (data->error == MMC_ERR_NONE) data->bytes_xfered += data->blocks * (1 << data->blksz_bits); if (data->error == MMC_ERR_TIMEOUT) { spin_lock_irqsave(&mmc_lock, flags); is_card_busy = 0; spin_unlock_irqrestore(&mmc_lock, flags); mmc_request_done(host->mmc, data->mrq); return; } if (!data->stop) { host->req = NULL; spin_lock_irqsave(&mmc_lock, flags); is_card_busy = 0; spin_unlock_irqrestore(&mmc_lock, flags); mmc_request_done(host->mmc, data->mrq); return; } mmc_davinci_start_command(host, data->stop);}static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, struct mmc_command *cmd){ unsigned long flags; host->cmd = NULL; switch (cmd->flags & MMC_RSP_MASK) { case MMC_RSP_NONE: /* resp 0 */ break; case MMC_RSP_SHORT: /* response types 1, 1b, 3, 4, 5, 6 */ cmd->resp[0] = mmcsd_regs->mmc_rsp67; break; case MMC_RSP_LONG: /* response type 2 */ cmd->resp[3] = mmcsd_regs->mmc_rsp01; cmd->resp[2] = mmcsd_regs->mmc_rsp23; cmd->resp[1] = mmcsd_regs->mmc_rsp45; cmd->resp[0] = mmcsd_regs->mmc_rsp67; break; } if (host->data == NULL || cmd->error != MMC_ERR_NONE) { host->req = NULL; if (cmd->error == MMC_ERR_TIMEOUT) { cmd->mrq->cmd->retries = 0; } spin_lock_irqsave(&mmc_lock, flags); is_card_busy = 0; spin_unlock_irqrestore(&mmc_lock, flags); mmc_request_done(host->mmc, cmd->mrq); }}static irqreturn_t mmc_davinci_irq(int irq, void *dev_id, struct pt_regs *regs){ struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; u16 status; int end_command; int end_transfer; int byte_cnt = 0, i = 0; unsigned long flags; if (host->is_core_command) { if (host->cmd == NULL && host->data == NULL) { status = mmcsd_regs->mmc_st0; dev_dbg(&mmc_dev, "Spurious interrupt 0x%04x\r\n", status); /* Disable the interrupt from mmcsd */ mmcsd_regs->mmc_im = 0; return IRQ_HANDLED; } } end_command = 0; end_transfer = 0; status = mmcsd_regs->mmc_st0; if (status == 0) { return IRQ_HANDLED; } if (host->is_core_command) { if (is_card_initialized) { if (new_card_state == 0) { if (host->cmd) { host->cmd->error |= MMC_ERR_TIMEOUT; mmc_davinci_cmd_done(host, host->cmd); } dev_dbg(&mmc_dev, "From code segment excuted when card removed\n"); return IRQ_HANDLED; } } while (status != 0) { if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { if (status & MMCSD_EVENT_WRITE) { /* Buffer almost empty */ if (host->bytes_left > 0) { byte_cnt = mmcsd_cfg.rw_threshold; host->bytes_left -= mmcsd_cfg.rw_threshold; for (i = 0; i < (byte_cnt / 4); i++) { mmcsd_regs->mmc_dxr = *host->buffer; host->buffer++; } } } } if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { if (status & MMCSD_EVENT_READ) { /* Buffer almost empty */ if (host->bytes_left > 0) { byte_cnt = mmcsd_cfg.rw_threshold; host->bytes_left -= mmcsd_cfg.rw_threshold; for (i = 0; i < (byte_cnt / 4); i++) { *host->buffer = mmcsd_regs->mmc_drr; host->buffer++; } } } } if (status & MMCSD_EVENT_BLOCK_XFERRED) { /* Block sent/received */ if (host->data != NULL) { end_transfer = 1; } } if (status & MMCSD_EVENT_ERROR_DATATIMEOUT) { /* Data timeout */ if ((host->data) && (new_card_state != 0)) { host->data->error |= MMC_ERR_TIMEOUT; spin_lock_irqsave(&mmc_lock, flags); new_card_state = 0; is_card_initialized = 0; spin_unlock_irqrestore(&mmc_lock,
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -