?? ehci-q.c
字號(hào):
if (type == PIPE_CONTROL) { info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 64 << 16; /* usb2 fixed maxpacket */ info1 |= 1 << 14; /* toggle from qtd */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else if (type == PIPE_BULK) { info1 |= (EHCI_TUNE_RL_HS << 28); info1 |= 512 << 16; /* usb2 fixed maxpacket */ info2 |= (EHCI_TUNE_MULT_HS << 30); } else { /* PIPE_INTERRUPT */ info1 |= max_packet (maxp) << 16; info2 |= hb_mult (maxp) << 30; } break; default: dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed);done: qh_put (qh); return NULL; } /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ /* init as live, toggle clear, advance to dummy */ qh->qh_state = QH_STATE_IDLE; qh->hw_info1 = cpu_to_le32 (info1); qh->hw_info2 = cpu_to_le32 (info2); usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); qh_refresh (ehci, qh); return qh;}/*-------------------------------------------------------------------------*//* move qh (and its qtds) onto async queue; maybe enable queue. */static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh){ __le32 dma = QH_NEXT (qh->qh_dma); struct ehci_qh *head; /* (re)start the async schedule? */ head = ehci->async; timer_action_done (ehci, TIMER_ASYNC_OFF); if (!head->qh_next.qh) { u32 cmd = ehci_readl(ehci, &ehci->regs->command); if (!(cmd & CMD_ASE)) { /* in case a clear of CMD_ASE didn't take yet */ (void)handshake(ehci, &ehci->regs->status, STS_ASS, 0, 150); cmd |= CMD_ASE | CMD_RUN; ehci_writel(ehci, cmd, &ehci->regs->command); ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; /* posted write need not be known to HC yet ... */ } } /* clear halt and/or toggle; and maybe recover from silicon quirk */ if (qh->qh_state == QH_STATE_IDLE) qh_refresh (ehci, qh); /* splice right after start */ qh->qh_next = head->qh_next; qh->hw_next = head->hw_next; wmb (); head->qh_next.qh = qh; head->hw_next = dma; qh->qh_state = QH_STATE_LINKED; /* qtd completions reported later by interrupt */}/*-------------------------------------------------------------------------*/#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)/* * For control/bulk/interrupt, return QH with these TDs appended. * Allocates and initializes the QH if necessary. * Returns null if it can't allocate a QH it needs to. * If the QH has TDs (urbs) already, that's great. */static struct ehci_qh *qh_append_tds ( struct ehci_hcd *ehci, struct urb *urb, struct list_head *qtd_list, int epnum, void **ptr){ struct ehci_qh *qh = NULL; qh = (struct ehci_qh *) *ptr; if (unlikely (qh == NULL)) { /* can't sleep here, we have ehci->lock... */ qh = qh_make (ehci, urb, GFP_ATOMIC); *ptr = qh; } if (likely (qh != NULL)) { struct ehci_qtd *qtd; if (unlikely (list_empty (qtd_list))) qtd = NULL; else qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); /* control qh may need patching ... */ if (unlikely (epnum == 0)) { /* usb_reset_device() briefly reverts to address 0 */ if (usb_pipedevice (urb->pipe) == 0) qh->hw_info1 &= ~QH_ADDR_MASK; } /* just one way to queue requests: swap with the dummy qtd. * only hc or qh_refresh() ever modify the overlay. */ if (likely (qtd != NULL)) { struct ehci_qtd *dummy; dma_addr_t dma; __le32 token; /* to avoid racing the HC, use the dummy td instead of * the first td of our list (becomes new dummy). both * tds stay deactivated until we're done, when the * HC is allowed to fetch the old dummy (4.10.2). */ token = qtd->hw_token; qtd->hw_token = HALT_BIT; wmb (); dummy = qh->dummy; dma = dummy->qtd_dma; *dummy = *qtd; dummy->qtd_dma = dma; list_del (&qtd->qtd_list); list_add (&dummy->qtd_list, qtd_list); __list_splice (qtd_list, qh->qtd_list.prev); ehci_qtd_init (qtd, qtd->qtd_dma); qh->dummy = qtd; /* hc must see the new dummy at list end */ dma = qtd->qtd_dma; qtd = list_entry (qh->qtd_list.prev, struct ehci_qtd, qtd_list); qtd->hw_next = QTD_NEXT (dma); /* let the hc process these next qtds */ wmb (); dummy->hw_token = token; urb->hcpriv = qh_get (qh); } } return qh;}/*-------------------------------------------------------------------------*/static intsubmit_async ( struct ehci_hcd *ehci, struct usb_host_endpoint *ep, struct urb *urb, struct list_head *qtd_list, gfp_t mem_flags) { struct ehci_qtd *qtd; int epnum; unsigned long flags; struct ehci_qh *qh = NULL; int rc = 0; qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list); epnum = ep->desc.bEndpointAddress;#ifdef EHCI_URB_TRACE ehci_dbg (ehci, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", __FUNCTION__, urb->dev->devpath, urb, epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", urb->transfer_buffer_length, qtd, ep->hcpriv);#endif spin_lock_irqsave (&ehci->lock, flags); if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &ehci_to_hcd(ehci)->flags))) { rc = -ESHUTDOWN; goto done; } qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); if (unlikely(qh == NULL)) { rc = -ENOMEM; goto done; } /* Control/bulk operations through TTs don't need scheduling, * the HC and TT handle it when the TT has a buffer ready. */ if (likely (qh->qh_state == QH_STATE_IDLE)) qh_link_async (ehci, qh_get (qh)); done: spin_unlock_irqrestore (&ehci->lock, flags); if (unlikely (qh == NULL)) qtd_list_free (ehci, urb, qtd_list); return rc;}/*-------------------------------------------------------------------------*//* the async qh for the qtds being reclaimed are now unlinked from the HC */static void end_unlink_async (struct ehci_hcd *ehci){ struct ehci_qh *qh = ehci->reclaim; struct ehci_qh *next; timer_action_done (ehci, TIMER_IAA_WATCHDOG); // qh->hw_next = cpu_to_le32 (qh->qh_dma); qh->qh_state = QH_STATE_IDLE; qh->qh_next.qh = NULL; qh_put (qh); // refcount from reclaim /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ next = qh->reclaim; ehci->reclaim = next; ehci->reclaim_ready = 0; qh->reclaim = NULL; qh_completions (ehci, qh); if (!list_empty (&qh->qtd_list) && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) qh_link_async (ehci, qh); else { qh_put (qh); // refcount from async list /* it's not free to turn the async schedule on/off; leave it * active but idle for a while once it empties. */ if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->async->qh_next.qh == NULL) timer_action (ehci, TIMER_ASYNC_OFF); } if (next) { ehci->reclaim = NULL; start_unlink_async (ehci, next); }}/* makes sure the async qh will become idle *//* caller must own ehci->lock */static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh){ int cmd = ehci_readl(ehci, &ehci->regs->command); struct ehci_qh *prev;#ifdef DEBUG assert_spin_locked(&ehci->lock); if (ehci->reclaim || (qh->qh_state != QH_STATE_LINKED && qh->qh_state != QH_STATE_UNLINK_WAIT) ) BUG ();#endif /* stop async schedule right now? */ if (unlikely (qh == ehci->async)) { /* can't get here without STS_ASS set */ if (ehci_to_hcd(ehci)->state != HC_STATE_HALT && !ehci->reclaim) { /* ... and CMD_IAAD clear */ ehci_writel(ehci, cmd & ~CMD_ASE, &ehci->regs->command); wmb (); // handshake later, if we need to timer_action_done (ehci, TIMER_ASYNC_OFF); } return; } qh->qh_state = QH_STATE_UNLINK; ehci->reclaim = qh = qh_get (qh); prev = ehci->async; while (prev->qh_next.qh != qh) prev = prev->qh_next.qh; prev->hw_next = qh->hw_next; prev->qh_next = qh->qh_next; wmb (); if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) { /* if (unlikely (qh->reclaim != 0)) * this will recurse, probably not much */ end_unlink_async (ehci); return; } ehci->reclaim_ready = 0; cmd |= CMD_IAAD; ehci_writel(ehci, cmd, &ehci->regs->command); (void)ehci_readl(ehci, &ehci->regs->command); timer_action (ehci, TIMER_IAA_WATCHDOG);}/*-------------------------------------------------------------------------*/static void scan_async (struct ehci_hcd *ehci){ struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; if (!++(ehci->stamp)) ehci->stamp++; timer_action_done (ehci, TIMER_ASYNC_SHRINK);rescan: qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ if (!list_empty (&qh->qtd_list) && qh->stamp != ehci->stamp) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan * qhs we already finished (no looping). */ qh = qh_get (qh); qh->stamp = ehci->stamp; temp = qh_completions (ehci, qh); qh_put (qh); if (temp != 0) { goto rescan; } } /* unlink idle entries, reducing HC PCI usage as well * as HCD schedule-scanning costs. delay for any qh * we just scanned, there's a not-unusual case that it * doesn't stay idle for long. * (plus, avoids some kind of re-activation race.) */ if (list_empty (&qh->qtd_list)) { if (qh->stamp == ehci->stamp) action = TIMER_ASYNC_SHRINK; else if (!ehci->reclaim && qh->qh_state == QH_STATE_LINKED) start_unlink_async (ehci, qh); } qh = qh->qh_next.qh; } while (qh); } if (action == TIMER_ASYNC_SHRINK) timer_action (ehci, TIMER_ASYNC_SHRINK);}
?? 快捷鍵說(shuō)明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -