usb: xhci: Acknowledge pending events in secondary event ring
As part of bus suspend if xHC is halted when there are pending events in the event ring, xHC stops responding to xhci commands upon host or device initiated bus resume. Fix this issue by going over all the pending events and writing address of xHC event ring enqueue pointer to ERDP register. This indicates to xHC that system software has processed all Events in the ring. Change-Id: I0c1fafb03c9848d043599fcee19e9bf07c3a3acd Signed-off-by: Hemant Kumar <hemantk@codeaurora.org>
This commit is contained in:
parent
368fecd7df
commit
13931636c5
2 changed files with 78 additions and 26 deletions
|
@ -1786,11 +1786,84 @@ void xhci_free_command(struct xhci_hcd *xhci,
|
|||
kfree(command);
|
||||
}
|
||||
|
||||
void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
|
||||
{
|
||||
union xhci_trb *erdp_trb, *current_trb;
|
||||
struct xhci_segment *seg;
|
||||
u64 erdp_reg;
|
||||
u32 iman_reg;
|
||||
dma_addr_t deq;
|
||||
unsigned long segment_offset;
|
||||
|
||||
/* disable irq, ack pending interrupt and ack all pending events */
|
||||
|
||||
iman_reg =
|
||||
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
iman_reg &= ~IMAN_IE;
|
||||
writel_relaxed(iman_reg,
|
||||
&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
iman_reg =
|
||||
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
if (iman_reg & IMAN_IP)
|
||||
writel_relaxed(iman_reg,
|
||||
&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
|
||||
/* last acked event trb is in erdp reg */
|
||||
erdp_reg =
|
||||
xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
|
||||
deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
|
||||
if (!deq) {
|
||||
pr_debug("%s: event ring handling not required\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
seg = xhci->sec_event_ring[intr_num]->first_seg;
|
||||
segment_offset = deq - seg->dma;
|
||||
|
||||
/* find out virtual address of the last acked event trb */
|
||||
erdp_trb = current_trb = &seg->trbs[0] +
|
||||
(segment_offset/sizeof(*current_trb));
|
||||
|
||||
/* read cycle state of the last acked trb to find out CCS */
|
||||
xhci->sec_event_ring[intr_num]->cycle_state =
|
||||
(current_trb->event_cmd.flags & TRB_CYCLE);
|
||||
|
||||
while (1) {
|
||||
/* last trb of the event ring: toggle cycle state */
|
||||
if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
|
||||
xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
|
||||
current_trb = &seg->trbs[0];
|
||||
} else {
|
||||
current_trb++;
|
||||
}
|
||||
|
||||
/* cycle state transition */
|
||||
if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
|
||||
xhci->sec_event_ring[intr_num]->cycle_state)
|
||||
break;
|
||||
}
|
||||
|
||||
if (erdp_trb != current_trb) {
|
||||
deq =
|
||||
xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
|
||||
current_trb);
|
||||
if (deq == 0)
|
||||
xhci_warn(xhci,
|
||||
"WARN ivalid SW event ring dequeue ptr.\n");
|
||||
/* Update HC event ring dequeue pointer */
|
||||
erdp_reg &= ERST_PTR_MASK;
|
||||
erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
|
||||
}
|
||||
|
||||
/* Clear the event handler busy flag (RW1C); event ring is empty. */
|
||||
erdp_reg |= ERST_EHB;
|
||||
xhci_write_64(xhci, erdp_reg,
|
||||
&xhci->sec_ir_set[intr_num]->erst_dequeue);
|
||||
}
|
||||
|
||||
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
|
||||
{
|
||||
int size;
|
||||
u32 iman_reg;
|
||||
u64 erdp_reg;
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.controller;
|
||||
|
||||
|
@ -1803,28 +1876,7 @@ int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
|
|||
size =
|
||||
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
|
||||
if (xhci->sec_erst[intr_num].entries) {
|
||||
/*
|
||||
* disable irq, ack pending interrupt and clear EHB for xHC to
|
||||
* generate interrupt again when new event ring is setup
|
||||
*/
|
||||
iman_reg =
|
||||
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
iman_reg &= ~IMAN_IE;
|
||||
writel_relaxed(iman_reg,
|
||||
&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
iman_reg =
|
||||
readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
if (iman_reg & IMAN_IP)
|
||||
writel_relaxed(iman_reg,
|
||||
&xhci->sec_ir_set[intr_num]->irq_pending);
|
||||
/* make sure IP gets cleared before clearing EHB */
|
||||
mb();
|
||||
|
||||
erdp_reg = xhci_read_64(xhci,
|
||||
&xhci->sec_ir_set[intr_num]->erst_dequeue);
|
||||
xhci_write_64(xhci, erdp_reg | ERST_EHB,
|
||||
&xhci->sec_ir_set[intr_num]->erst_dequeue);
|
||||
|
||||
xhci_handle_sec_intr_events(xhci, intr_num);
|
||||
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
|
||||
xhci->sec_erst[intr_num].erst_dma_addr);
|
||||
xhci->sec_erst[intr_num].entries = NULL;
|
||||
|
|
|
@ -336,7 +336,7 @@ static int xhci_plat_runtime_suspend(struct device *dev)
|
|||
|
||||
dev_dbg(dev, "xhci-plat runtime suspend\n");
|
||||
|
||||
return 0;
|
||||
return xhci_suspend(xhci, true);
|
||||
}
|
||||
|
||||
static int xhci_plat_runtime_resume(struct device *dev)
|
||||
|
@ -350,7 +350,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
|
|||
|
||||
dev_dbg(dev, "xhci-plat runtime resume\n");
|
||||
|
||||
ret = 0;
|
||||
ret = xhci_resume(xhci, false);
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
|
||||
return ret;
|
||||
|
|
Loading…
Add table
Reference in a new issue