diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 68 |
1 files changed, 59 insertions, 9 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fed3385aeac0..9741cdeea9d7 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -399,7 +399,7 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, * stream once the endpoint is on the HW schedule. */ if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) || - (ep_state & EP_HALTED)) + (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) return; writel(DB_VALUE(ep_index, stream_id), db_addr); /* The CPU has better things to do at this point than wait for a @@ -433,6 +433,13 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, } } +void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, + unsigned int slot_id, + unsigned int ep_index) +{ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); +} + /* Get the right ring for the given slot_id, ep_index and stream_id. * If the endpoint supports streams, boundary check the URB's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring. @@ -656,6 +663,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct device *dev = xhci_to_hcd(xhci)->self.controller; struct xhci_segment *seg = td->bounce_seg; struct urb *urb = td->urb; + size_t len; if (!ring || !seg || !urb) return; @@ -666,11 +674,14 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, return; } - /* for in tranfers we need to copy the data from bounce to sg */ - sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); + /* for in tranfers we need to copy the data from bounce to sg */ + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); seg->bounce_len = 0; seg->bounce_offs = 0; } @@ -1608,8 +1619,13 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } - if (hcd->speed >= HCD_USB3 && (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) + if (hcd->speed >= HCD_USB3 && + (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { + slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); + if (slot_id && xhci->devs[slot_id]) + xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; bus_state->port_remote_wakeup &= ~(1 << hcd_portnum); + } if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); @@ -1790,6 +1806,23 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, return NULL; } +static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, + struct xhci_virt_ep *ep) +{ + /* + * As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (USB 2.0 specification 11.17.5). + */ + if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && + (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && + !(ep->ep_state & EP_CLEARING_TT)) { + ep->ep_state |= EP_CLEARING_TT; + td->urb->ep->hcpriv = td->urb->dev; + if (usb_hub_clear_tt_buffer(td->urb)) + ep->ep_state &= ~EP_CLEARING_TT; + } +} + static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, struct xhci_td *td, @@ -1797,6 +1830,14 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; struct xhci_command *command; + + /* + * Avoid resetting endpoint if link is inactive. Can cause host hang. + * Device will be reset soon to recover the link so don't do anything + */ + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + return; + command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) return; @@ -1808,6 +1849,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, if (reset_type == EP_HARD_RESET) { ep->ep_state |= EP_HARD_CLEAR_TOGGLE; xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); + xhci_clear_hub_tt_buffer(xhci, td, ep); } xhci_ring_cmd_db(xhci); } @@ -3127,6 +3169,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, unsigned int unalign; unsigned int max_pkt; u32 new_buff_len; + size_t len; max_pkt = usb_endpoint_maxp(&urb->ep->desc); unalign = (enqd_len + *trb_buff_len) % max_pkt; @@ -3157,8 +3200,12 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, new_buff_len, enqd_len); + if (len != seg->bounce_len) + xhci_warn(xhci, + "WARN Wrong bounce buffer write length: %zu != %d\n", + len, seg->bounce_len); seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { @@ -3423,11 +3470,14 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (urb->transfer_buffer_length > 0) { u32 length_field, remainder; + u64 addr; if (xhci_urb_suitable_for_idt(urb)) { - memcpy(&urb->transfer_dma, urb->transfer_buffer, + memcpy(&addr, urb->transfer_buffer, urb->transfer_buffer_length); field |= TRB_IDT; + } else { + addr = (u64) urb->transfer_dma; } remainder = xhci_td_remainder(xhci, 0, @@ -3440,8 +3490,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; queue_trb(xhci, ep_ring, true, - lower_32_bits(urb->transfer_dma), - upper_32_bits(urb->transfer_dma), + lower_32_bits(addr), + upper_32_bits(addr), length_field, field | ep_ring->cycle_state); } |