summaryrefslogtreecommitdiffstats
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/host/ehci-dbg.c24
-rw-r--r--drivers/usb/host/ehci-hcd.c415
-rw-r--r--drivers/usb/host/ehci-hub.c97
-rw-r--r--drivers/usb/host/ehci-mem.c25
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-q.c311
-rw-r--r--drivers/usb/host/ehci-sched.c552
-rw-r--r--drivers/usb/host/ehci-tegra.c5
-rw-r--r--drivers/usb/host/ehci-timer.c401
-rw-r--r--drivers/usb/host/ehci.h134
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/serial/metro-usb.c8
-rw-r--r--drivers/usb/serial/option.c26
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/storage/usb.c5
20 files changed, 1174 insertions, 931 deletions
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 25e7d72f339e..65a55abb791f 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -475,6 +475,8 @@ retry:
goto retry;
}
if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
spin_unlock_irq(&desc->iuspin);
goto retry;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d739f966b5a8..3febe54883bd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2327,12 +2327,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
{
return hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_INACTIVE;
+ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_INACTIVE) ||
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_COMP_MOD)) ;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2368,7 +2372,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
*
* See https://bugzilla.kernel.org/show_bug.cgi?id=41752
*/
- if (hub_port_inactive(hub, portstatus)) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
int ret;
if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -4471,9 +4475,7 @@ static void hub_events(void)
/* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
- if (hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE)
- == USB_SS_PORT_LS_SS_INACTIVE) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 7561966fbdc4..f0c00de035ef 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -404,9 +404,9 @@ struct debug_buffer {
#define speed_char(info1) ({ char tmp; \
switch (info1 & (3 << 12)) { \
- case 0 << 12: tmp = 'f'; break; \
- case 1 << 12: tmp = 'l'; break; \
- case 2 << 12: tmp = 'h'; break; \
+ case QH_FULL_SPEED: tmp = 'f'; break; \
+ case QH_LOW_SPEED: tmp = 'l'; break; \
+ case QH_HIGH_SPEED: tmp = 'h'; break; \
default: tmp = '?'; break; \
}; tmp; })
@@ -538,12 +538,13 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
spin_lock_irqsave (&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
qh_lines (ehci, qh, &next, &size);
- if (ehci->reclaim && size > 0) {
- temp = scnprintf (next, size, "\nreclaim =\n");
+ if (ehci->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
next += temp;
- for (qh = ehci->reclaim; size > 0 && qh; qh = qh->reclaim)
+ for (qh = ehci->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
qh_lines (ehci, qh, &next, &size);
}
spin_unlock_irqrestore (&ehci->lock, flags);
@@ -705,6 +706,8 @@ static const char *rh_state_string(struct ehci_hcd *ehci)
return "suspended";
case EHCI_RH_RUNNING:
return "running";
+ case EHCI_RH_STOPPING:
+ return "stopping";
}
return "?";
}
@@ -841,16 +844,17 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
}
- if (ehci->reclaim) {
- temp = scnprintf(next, size, "reclaim qh %p\n", ehci->reclaim);
+ if (ehci->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ ehci->async_unlink);
size -= temp;
next += temp;
}
#ifdef EHCI_STATS
temp = scnprintf (next, size,
- "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+ "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
size -= temp;
next += temp;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index f9a783bfa1fe..e44ca5453aa2 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -30,8 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
@@ -94,12 +93,6 @@ static const char hcd_name [] = "ehci_hcd";
*/
#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */
-#define EHCI_IAA_MSECS 10 /* arbitrary */
-#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
-#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
-#define EHCI_SHRINK_JIFFIES (DIV_ROUND_UP(HZ, 200) + 1)
- /* 5-ms async qh unlink delay */
-
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0; // 0 to 6
module_param (log2_irq_thresh, int, S_IRUGO);
@@ -130,41 +123,6 @@ MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
/*-------------------------------------------------------------------------*/
-static void
-timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
-{
- /* Don't override timeouts which shrink or (later) disable
- * the async ring; just the I/O watchdog. Note that if a
- * SHRINK were pending, OFF would never be requested.
- */
- if (timer_pending(&ehci->watchdog)
- && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF))
- & ehci->actions))
- return;
-
- if (!test_and_set_bit(action, &ehci->actions)) {
- unsigned long t;
-
- switch (action) {
- case TIMER_IO_WATCHDOG:
- if (!ehci->need_io_watchdog)
- return;
- t = EHCI_IO_JIFFIES;
- break;
- case TIMER_ASYNC_OFF:
- t = EHCI_ASYNC_JIFFIES;
- break;
- /* case TIMER_ASYNC_SHRINK: */
- default:
- t = EHCI_SHRINK_JIFFIES;
- break;
- }
- mod_timer(&ehci->watchdog, t + jiffies);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
/*
* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
@@ -209,21 +167,24 @@ static int tdi_in_host_mode (struct ehci_hcd *ehci)
return (tmp & 3) == USBMODE_CM_HC;
}
-/* force HC to halt state from unknown (EHCI spec section 2.3) */
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_halt (struct ehci_hcd *ehci)
{
- u32 temp = ehci_readl(ehci, &ehci->regs->status);
+ u32 temp;
+
+ spin_lock_irq(&ehci->lock);
/* disable any irqs left enabled by previous code */
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- if (ehci_is_TDI(ehci) && tdi_in_host_mode(ehci) == 0) {
+ if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) {
+ spin_unlock_irq(&ehci->lock);
return 0;
}
- if ((temp & STS_HALT) != 0)
- return 0;
-
/*
* This routine gets called during probe before ehci->command
* has been initialized, so we can't rely on its value.
@@ -232,70 +193,12 @@ static int ehci_halt (struct ehci_hcd *ehci)
temp = ehci_readl(ehci, &ehci->regs->command);
temp &= ~(CMD_RUN | CMD_IAAD);
ehci_writel(ehci, temp, &ehci->regs->command);
- return handshake (ehci, &ehci->regs->status,
- STS_HALT, STS_HALT, 16 * 125);
-}
-
-#if defined(CONFIG_USB_SUSPEND) && defined(CONFIG_PPC_PS3)
-/*
- * The EHCI controller of the Cell Super Companion Chip used in the
- * PS3 will stop the root hub after all root hub ports are suspended.
- * When in this condition handshake will return -ETIMEDOUT. The
- * STS_HLT bit will not be set, so inspection of the frame index is
- * used here to test for the condition. If the condition is found
- * return success to allow the USB suspend to complete.
- */
-
-static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
- void __iomem *ptr, u32 mask, u32 done,
- int usec)
-{
- unsigned int old_index;
- int error;
-
- if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
- return -ETIMEDOUT;
-
- old_index = ehci_read_frame_index(ehci);
-
- error = handshake(ehci, ptr, mask, done, usec);
-
- if (error == -ETIMEDOUT && ehci_read_frame_index(ehci) == old_index)
- return 0;
-
- return error;
-}
-
-#else
-
-static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
- void __iomem *ptr, u32 mask, u32 done,
- int usec)
-{
- return -ETIMEDOUT;
-}
-
-#endif
-
-static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
- u32 mask, u32 done, int usec)
-{
- int error;
-
- error = handshake(ehci, ptr, mask, done, usec);
- if (error == -ETIMEDOUT)
- error = handshake_for_broken_root_hub(ehci, ptr, mask, done,
- usec);
-
- if (error) {
- ehci_halt(ehci);
- ehci->rh_state = EHCI_RH_HALTED;
- ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
- ptr, mask, done, error);
- }
+ spin_unlock_irq(&ehci->lock);
+ synchronize_irq(ehci_to_hcd(ehci)->irq);
- return error;
+ return handshake(ehci, &ehci->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
}
/* put TDI/ARC silicon into EHCI mode */
@@ -314,7 +217,10 @@ static void tdi_reset (struct ehci_hcd *ehci)
ehci_writel(ehci, tmp, &ehci->regs->usbmode);
}
-/* reset a non-running (STS_HALT == 1) controller */
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
static int ehci_reset (struct ehci_hcd *ehci)
{
int retval;
@@ -352,36 +258,40 @@ static int ehci_reset (struct ehci_hcd *ehci)
return retval;
}
-/* idle the controller (from running) */
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
static void ehci_quiesce (struct ehci_hcd *ehci)
{
u32 temp;
-#ifdef DEBUG
if (ehci->rh_state != EHCI_RH_RUNNING)
- BUG ();
-#endif
+ return;
/* wait for any schedule enables/disables to take effect */
temp = (ehci->command << 10) & (STS_ASS | STS_PSS);
- if (handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, temp, 16 * 125))
- return;
+ handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
/* then disable anything that's still active */
+ spin_lock_irq(&ehci->lock);
ehci->command &= ~(CMD_ASE | CMD_PSE);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ spin_unlock_irq(&ehci->lock);
/* hardware can take 16 microframes to turn off ... */
- handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_ASS | STS_PSS, 0, 16 * 125);
+ handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
}
/*-------------------------------------------------------------------------*/
static void end_unlink_async(struct ehci_hcd *ehci);
+static void unlink_empty_async(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
+#include "ehci-timer.c"
#include "ehci-hub.c"
#include "ehci-lpm.c"
#include "ehci-mem.c"
@@ -391,68 +301,6 @@ static void ehci_work(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
-static void ehci_iaa_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave (&ehci->lock, flags);
-
- /* Lost IAA irqs wedge things badly; seen first with a vt8235.
- * So we need this watchdog, but must protect it against both
- * (a) SMP races against real IAA firing and retriggering, and
- * (b) clean HC shutdown, when IAA watchdog was pending.
- */
- if (ehci->reclaim
- && !timer_pending(&ehci->iaa_watchdog)
- && ehci->rh_state == EHCI_RH_RUNNING) {
- u32 cmd, status;
-
- /* If we get here, IAA is *REALLY* late. It's barely
- * conceivable that the system is so busy that CMD_IAAD
- * is still legitimately set, so let's be sure it's
- * clear before we read STS_IAA. (The HC should clear
- * CMD_IAAD when it sets STS_IAA.)
- */
- cmd = ehci_readl(ehci, &ehci->regs->command);
-
- /* If IAA is set here it either legitimately triggered
- * before we cleared IAAD above (but _way_ late, so we'll
- * still count it as lost) ... or a silicon erratum:
- * - VIA seems to set IAA without triggering the IRQ;
- * - IAAD potentially cleared without setting IAA.
- */
- status = ehci_readl(ehci, &ehci->regs->status);
- if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT (ehci->stats.lost_iaa);
- ehci_writel(ehci, STS_IAA, &ehci->regs->status);
- }
-
- ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
- status, cmd);
- end_unlink_async(ehci);
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-}
-
-static void ehci_watchdog(unsigned long param)
-{
- struct ehci_hcd *ehci = (struct ehci_hcd *) param;
- unsigned long flags;
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* stop async processing after it's idled a bit */
- if (test_bit (TIMER_ASYNC_OFF, &ehci->actions))
- start_unlink_async (ehci, ehci->async);
-
- /* ehci could run by timer, without IRQs ... */
- ehci_work (ehci);
-
- spin_unlock_irqrestore (&ehci->lock, flags);
-}
-
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
@@ -468,11 +316,14 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
/*
* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
- * Should be called with ehci->lock held.
+ * Must be called with interrupts enabled and the lock not held.
*/
static void ehci_silence_controller(struct ehci_hcd *ehci)
{
ehci_halt(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ ehci->rh_state = EHCI_RH_HALTED;
ehci_turn_off_all_ports(ehci);
/* make BIOS/etc use companion controller during reboot */
@@ -480,6 +331,7 @@ static void ehci_silence_controller(struct ehci_hcd *ehci)
/* unblock posted writes */
ehci_readl(ehci, &ehci->regs->configured_flag);
+ spin_unlock_irq(&ehci->lock);
}
/* ehci_shutdown kick in for silicon on any bus (not just pci, etc).
@@ -490,12 +342,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
-
spin_lock_irq(&ehci->lock);
- ehci_silence_controller(ehci);
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->enabled_hrtimer_events = 0;
spin_unlock_irq(&ehci->lock);
+
+ ehci_silence_controller(ehci);
+
+ hrtimer_cancel(&ehci->hrtimer);
}
static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
@@ -524,28 +379,33 @@ static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
*/
static void ehci_work (struct ehci_hcd *ehci)
{
- timer_action_done (ehci, TIMER_IO_WATCHDOG);
-
/* another CPU may drop ehci->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
- if (ehci->scanning)
+ if (ehci->scanning) {
+ ehci->need_rescan = true;
return;
- ehci->scanning = 1;
- scan_async (ehci);
- if (ehci->next_uframe != -1)
- scan_periodic (ehci);
- ehci->scanning = 0;
+ }
+ ehci->scanning = true;
+
+ rescan:
+ ehci->need_rescan = false;
+ if (ehci->async_count)
+ scan_async(ehci);
+ if (ehci->intr_count > 0)
+ scan_intr(ehci);
+ if (ehci->isoc_count > 0)
+ scan_isoc(ehci);
+ if (ehci->need_rescan)
+ goto rescan;
+ ehci->scanning = false;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
- if (ehci->rh_state == EHCI_RH_RUNNING &&
- (ehci->async->qh_next.ptr != NULL ||
- ehci->periodic_sched != 0))
- timer_action (ehci, TIMER_IO_WATCHDOG);
+ turn_on_io_watchdog(ehci);
}
/*
@@ -558,24 +418,22 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_dbg (ehci, "stop\n");
/* no more interrupts ... */
- del_timer_sync (&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
spin_lock_irq(&ehci->lock);
- if (ehci->rh_state == EHCI_RH_RUNNING)
- ehci_quiesce (ehci);
+ ehci->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&ehci->lock);
+ ehci_quiesce(ehci);
ehci_silence_controller(ehci);
ehci_reset (ehci);
- spin_unlock_irq(&ehci->lock);
+ hrtimer_cancel(&ehci->hrtimer);
remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq (&ehci->lock);
- if (ehci->async)
- ehci_work (ehci);
+ end_free_itds(ehci);
spin_unlock_irq (&ehci->lock);
ehci_mem_cleanup (ehci);
@@ -583,8 +441,8 @@ static void ehci_stop (struct usb_hcd *hcd)
usb_amd_dev_put();
#ifdef EHCI_STATS
- ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
+ ehci_dbg(ehci, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
ehci_dbg (ehci, "complete %ld unlink %ld\n",
ehci->stats.complete, ehci->stats.unlink);
@@ -609,13 +467,10 @@ static int ehci_init(struct usb_hcd *hcd)
* keep io watchdog by default, those good HCDs could turn off it later
*/
ehci->need_io_watchdog = 1;
- init_timer(&ehci->watchdog);
- ehci->watchdog.function = ehci_watchdog;
- ehci->watchdog.data = (unsigned long) ehci;
- init_timer(&ehci->iaa_watchdog);
- ehci->iaa_watchdog.function = ehci_iaa_watchdog;
- ehci->iaa_watchdog.data = (unsigned long) ehci;
+ hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ehci->hrtimer.function = ehci_hrtimer_func;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
@@ -630,6 +485,7 @@ static int ehci_init(struct usb_hcd *hcd)
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
ehci->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
INIT_LIST_HEAD(&ehci->cached_sitd_list);
@@ -651,10 +507,6 @@ static int ehci_init(struct usb_hcd *hcd)
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
- ehci->reclaim = NULL;
- ehci->next_uframe = -1;
- ehci->clock_frame = -1;
-
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
@@ -667,7 +519,7 @@ static int ehci_init(struct usb_hcd *hcd)
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
#if defined(CONFIG_PPC_PS3)
- hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
+ hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE);
#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
@@ -823,12 +675,12 @@ static int ehci_setup(struct usb_hcd *hcd)
ehci->sbrn = HCD_USB2;
- retval = ehci_halt(ehci);
+ /* data structure init */
+ retval = ehci_init(hcd);
if (retval)
return retval;
- /* data structure init */
- retval = ehci_init(hcd);
+ retval = ehci_halt(ehci);
if (retval)
return retval;
@@ -893,14 +745,28 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG)
+ ++ehci->next_hrtimer_event;
+
/* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
- if (ehci->reclaim) {
- COUNT(ehci->stats.reclaim);
+ if (ehci->async_iaa) {
+ COUNT(ehci->stats.iaa);
end_unlink_async(ehci);
} else
- ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
+ ehci_dbg(ehci, "IAA with nothing unlinked?\n");
}
/* remote wakeup [4.3.1] */
@@ -954,15 +820,19 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci_err(ehci, "fatal error\n");
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
- ehci_halt(ehci);
dead:
- ehci_reset(ehci);
- ehci_writel(ehci, 0, &ehci->regs->configured_flag);
usb_hc_died(hcd);
- /* generic layer kills/unlinks all urbs, then
- * uses ehci_stop to clean up the rest
- */
- bh = 1;
+
+ /* Don't let the controller do anything more */
+ ehci->shutdown = true;
+ ehci->rh_state = EHCI_RH_STOPPING;
+ ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_handle_controller_death(ehci);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
}
if (bh)
@@ -1024,38 +894,6 @@ static int ehci_urb_enqueue (
}
}
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
-{
- /* failfast */
- if (ehci->rh_state != EHCI_RH_RUNNING && ehci->reclaim)
- end_unlink_async(ehci);
-
- /* If the QH isn't linked then there's nothing we can do
- * unless we were called during a giveback, in which case
- * qh_completions() has to deal with it.
- */
- if (qh->qh_state != QH_STATE_LINKED) {
- if (qh->qh_state == QH_STATE_COMPLETING)
- qh->needs_rescan = 1;
- return;
- }
-
- /* defer till later if busy */
- if (ehci->reclaim) {
- struct ehci_qh *last;
-
- for (last = ehci->reclaim;
- last->reclaim;
- last = last->reclaim)
- continue;
- qh->qh_state = QH_STATE_UNLINK_WAIT;
- last->reclaim = qh;
-
- /* start IAA cycle */
- } else
- start_unlink_async (ehci, qh);
-}
-
/* remove from hardware lists
* completions normally happen asynchronously
*/
@@ -1082,7 +920,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
- unlink_async(ehci, qh);
+ start_unlink_async(ehci, qh);
break;
case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT:
@@ -1102,7 +940,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
- intr_deschedule (ehci, qh);
+ start_unlink_intr(ehci, qh);
break;
case QH_STATE_IDLE:
qh_completions (ehci, qh);
@@ -1150,11 +988,17 @@ rescan:
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
- ehci_vdbg (ehci, "iso delay\n");
- goto idle_timeout;
+ struct ehci_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
}
- if (ehci->rh_state != EHCI_RH_RUNNING)
+ if (ehci->rh_state < EHCI_RH_RUNNING)
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
@@ -1167,7 +1011,7 @@ rescan:
* may already be unlinked.
*/
if (tmp)
- unlink_async(ehci, qh);
+ start_unlink_async(ehci, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
@@ -1179,7 +1023,7 @@ idle_timeout:
if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
- qh_put (qh);
+ qh_destroy(ehci, qh);
break;
}
/* else FALL THROUGH */
@@ -1192,8 +1036,8 @@ idle_timeout:
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
+ done:
ep->hcpriv = NULL;
-done:
spin_unlock_irqrestore (&ehci->lock, flags);
}
@@ -1230,9 +1074,9 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* re-linking will call qh_refresh().
*/
if (eptype == USB_ENDPOINT_XFER_BULK)
- unlink_async(ehci, qh);
+ start_unlink_async(ehci, qh);
else
- intr_deschedule(ehci, qh);
+ start_unlink_intr(ehci, qh);
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -1287,6 +1131,9 @@ static int __maybe_unused ehci_resume(struct usb_hcd *hcd, bool hibernated)
/* Mark hardware accessible again as we are back to full power by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ if (ehci->shutdown)
+ return 0; /* Controller is dead */
+
/*
* If CF is still set and we aren't resuming from hibernation
* then we maintained suspend power.
@@ -1297,10 +1144,17 @@ static int __maybe_unused ehci_resume(struct usb_hcd *hcd, bool hibernated)
int mask = INTR_MASK;
ehci_prepare_ports_for_controller_resume(ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
+
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
+ skip:
+ spin_unlock_irq(&ehci->lock);
return 0;
}
@@ -1312,21 +1166,20 @@ static int __maybe_unused ehci_resume(struct usb_hcd *hcd, bool hibernated)
(void) ehci_halt(ehci);
(void) ehci_reset(ehci);
- /* emptying the schedule aborts any urbs */
spin_lock_irq(&ehci->lock);
- if (ehci->reclaim)
- end_unlink_async(ehci);
- ehci_work(ehci);
- spin_unlock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto skip;
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+ ehci->rh_state = EHCI_RH_SUSPENDED;
+ spin_unlock_irq(&ehci->lock);
+
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
- ehci->rh_state = EHCI_RH_SUSPENDED;
return 1;
}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index b3e2d66e95bb..c7880223738a 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -59,6 +59,7 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
/* Give the connections some time to appear */
msleep(20);
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
@@ -70,23 +71,30 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
clear_bit(port, &ehci->owned_ports);
else if (test_bit(port, &ehci->companion_ports))
ehci_writel(ehci, status & ~PORT_PE, reg);
- else
+ else {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, SetPortFeature,
USB_PORT_FEAT_RESET, port + 1,
NULL, 0);
+ spin_lock_irq(&ehci->lock);
+ }
}
}
+ spin_unlock_irq(&ehci->lock);
if (!ehci->owned_ports)
return;
msleep(90); /* Wait for resets to complete */
+ spin_lock_irq(&ehci->lock);
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
if (test_bit(port, &ehci->owned_ports)) {
+ spin_unlock_irq(&ehci->lock);
ehci_hub_control(hcd, GetPortStatus,
0, port + 1,
(char *) &buf, sizeof(buf));
+ spin_lock_irq(&ehci->lock);
/* The companion should now own the port,
* but if something went wrong the port must not
@@ -105,6 +113,7 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci)
}
ehci->owned_ports = 0;
+ spin_unlock_irq(&ehci->lock);
}
static int ehci_port_change(struct ehci_hcd *ehci)
@@ -133,7 +142,6 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
{
int port;
u32 temp;
- unsigned long flags;
/* If remote wakeup is enabled for the root hub but disabled
* for the controller, we must adjust all the port wakeup flags
@@ -143,7 +151,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
return;
- spin_lock_irqsave(&ehci->lock, flags);
+ spin_lock_irq(&ehci->lock);
/* clear phy low-power mode before changing wakeup flags */
if (ehci->has_hostpc) {
@@ -154,9 +162,9 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
temp = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
}
- spin_unlock_irqrestore(&ehci->lock, flags);
+ spin_unlock_irq(&ehci->lock);
msleep(5);
- spin_lock_irqsave(&ehci->lock, flags);
+ spin_lock_irq(&ehci->lock);
}
port = HCS_N_PORTS(ehci->hcs_params);
@@ -194,7 +202,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
if (!suspending && ehci_port_change(ehci))
usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
- spin_unlock_irqrestore(&ehci->lock, flags);
+ spin_unlock_irq(&ehci->lock);
}
static int ehci_bus_suspend (struct usb_hcd *hcd)
@@ -208,10 +216,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
- del_timer_sync(&ehci->watchdog);
- del_timer_sync(&ehci->iaa_watchdog);
+
+ /* stop the schedules */
+ ehci_quiesce(ehci);
spin_lock_irq (&ehci->lock);
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ goto done;
/* Once the controller is stopped, port resumes that are already
* in progress won't complete. Hence if remote wakeup is enabled
@@ -226,11 +237,6 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
}
- /* stop schedules, clean any completed work */
- if (ehci->rh_state == EHCI_RH_RUNNING)
- ehci_quiesce (ehci);
- ehci_work(ehci);
-
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
@@ -292,6 +298,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
"succeeded" : "failed");
}
}
+ spin_unlock_irq(&ehci->lock);
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
@@ -299,10 +306,18 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
/* turn off now-idle HC */
ehci_halt (ehci);
+
+ spin_lock_irq(&ehci->lock);
+ if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD))
+ ehci_handle_controller_death(ehci);
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ goto done;
ehci->rh_state = EHCI_RH_SUSPENDED;
- if (ehci->reclaim)
- end_unlink_async(ehci);
+ end_unlink_async(ehci);
+ unlink_empty_async(ehci);
+ ehci_handle_intr_unlinks(ehci);
+ end_free_itds(ehci);
/* allow remote wakeup */
mask = INTR_MASK;
@@ -311,13 +326,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
+ done:
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
spin_unlock_irq (&ehci->lock);
- /* ehci_work() may have re-enabled the watchdog timer, which we do not
- * want, and so we must delete any pending watchdog timer events.
- */
- del_timer_sync(&ehci->watchdog);
+ hrtimer_cancel(&ehci->hrtimer);
return 0;
}
@@ -334,10 +349,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (time_before (jiffies, ehci->next_statechange))
msleep(5);
spin_lock_irq (&ehci->lock);
- if (!HCD_HW_ACCESSIBLE(hcd)) {
- spin_unlock_irq(&ehci->lock);
- return -ESHUTDOWN;
- }
+ if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown)
+ goto shutdown;
if (unlikely(ehci->debug)) {
if (!dbgp_reset_prep())
@@ -376,6 +389,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(8);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
/* clear phy low-power mode before resume */
if (ehci->bus_suspended && ehci->has_hostpc) {
@@ -393,6 +408,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(5);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
}
/* manually resume the ports we suspended during bus_suspend() */
@@ -413,6 +430,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
spin_unlock_irq(&ehci->lock);
msleep(20);
spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
}
i = HCS_N_PORTS (ehci->hcs_params);
@@ -424,27 +443,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
- (void) ehci_readl(ehci, &ehci->regs->command);
-
- /* maybe re-activate the schedule(s) */
- temp = 0;
- if (ehci->async->qh_next.qh)
- temp |= CMD_ASE;
- if (ehci->periodic_sched)
- temp |= CMD_PSE;
- if (temp) {
- ehci->command |= temp;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- }
ehci->next_statechange = jiffies + msecs_to_jiffies(5);
+ spin_unlock_irq(&ehci->lock);
+
+ ehci_handover_companion_ports(ehci);
/* Now we can safely re-enable irqs */
+ spin_lock_irq(&ehci->lock);
+ if (ehci->shutdown)
+ goto shutdown;
ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
+ (void) ehci_readl(ehci, &ehci->regs->intr_enable);
+ spin_unlock_irq(&ehci->lock);
- spin_unlock_irq (&ehci->lock);
- ehci_handover_companion_ports(ehci);
return 0;
+
+ shutdown:
+ spin_unlock_irq(&ehci->lock);
+ return -ESHUTDOWN;
}
#else
@@ -1031,7 +1048,9 @@ static int ehci_hub_control (
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_quiesce(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
/* Put all enabled ports into suspend */
while (ports--) {
@@ -1043,7 +1062,11 @@ static int ehci_hub_control (
ehci_writel(ehci, temp | PORT_SUSPEND,
sreg);
}
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
ehci_halt(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
+
temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 12f70c302b0b..ef2c3a1eca4b 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -64,10 +64,8 @@ static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd)
}
-static void qh_destroy(struct ehci_qh *qh)
+static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_hcd *ehci = qh->ehci;
-
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
@@ -92,8 +90,6 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
- qh->refcount = 1;
- qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
@@ -113,20 +109,6 @@ fail:
return NULL;
}
-/* to share a qh (cpu threads, or hc) */
-static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
-{
- WARN_ON(!qh->refcount);
- qh->refcount++;
- return qh;
-}
-
-static inline void qh_put (struct ehci_qh *qh)
-{
- if (!--qh->refcount)
- qh_destroy(qh);
-}
-
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
@@ -136,13 +118,12 @@ static inline void qh_put (struct ehci_qh *qh)
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
- free_cached_lists(ehci);
if (ehci->async)
- qh_put (ehci->async);
+ qh_destroy(ehci, ehci->async);
ehci->async = NULL;
if (ehci->dummy)
- qh_put(ehci->dummy);
+ qh_destroy(ehci, ehci->dummy);
ehci->dummy = NULL;
/* DMA consistent memory and pools */
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 21e5f963f331..2cb7d370c4ef 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -104,10 +104,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_INTEL:
ehci->fs_i_thresh = 1;
- if (pdev->device == 0x27cc) {
- ehci->broken_periodic = 1;
- ehci_info(ehci, "using broken periodic workaround\n");
- }
if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB)
hcd->has_tt = 1;
break;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4378bf72bbac..9bc39ca460c8 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -100,7 +100,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
+ if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) {
unsigned is_out, epnum;
is_out = qh->is_out;
@@ -265,7 +265,6 @@ __acquires(ehci->lock)
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
- qh_put (qh);
}
if (unlikely(urb->unlinked)) {
@@ -294,9 +293,6 @@ __acquires(ehci->lock)
spin_lock (&ehci->lock);
}
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
-
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
@@ -326,7 +322,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
- * scan_periodic().
+ * scan_intr().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
@@ -434,7 +430,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* stop scanning when we reach qtds the hc is using */
} else if (likely (!stopped
- && ehci->rh_state == EHCI_RH_RUNNING)) {
+ && ehci->rh_state >= EHCI_RH_RUNNING)) {
break;
/* scan the whole queue for unlinks whenever it stops */
@@ -442,7 +438,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
stopped = 1;
/* cancel everything if we halt, suspend, etc */
- if (ehci->rh_state != EHCI_RH_RUNNING)
+ if (ehci->rh_state < EHCI_RH_RUNNING)
last_status = -ESHUTDOWN;
/* this qtd is active; skip it unless a previous qtd
@@ -836,7 +832,6 @@ qh_make (
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
- qh->stamp = ehci->periodic_stamp;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
@@ -887,7 +882,7 @@ qh_make (
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
- info1 |= (1 << 12); /* EPS "low" */
+ info1 |= QH_LOW_SPEED;
/* FALL THROUGH */
case USB_SPEED_FULL:
@@ -895,8 +890,8 @@ qh_make (
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
- info1 |= (1 << 27); /* for TT */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
}
info1 |= maxp << 16;
@@ -921,11 +916,11 @@ qh_make (
break;
case USB_SPEED_HIGH: /* no TT involved */
- info1 |= (2 << 12); /* EPS "high" */
+ info1 |= QH_HIGH_SPEED;
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
- info1 |= 1 << 14; /* toggle from qtd */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
@@ -946,7 +941,7 @@ qh_make (
ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
urb->dev->speed);
done:
- qh_put (qh);
+ qh_destroy(ehci, qh);
return NULL;
}
@@ -965,6 +960,31 @@ done:
/*-------------------------------------------------------------------------*/
+static void enable_async(struct ehci_hcd *ehci)
+{
+ if (ehci->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ ehci_poll_ASS(ehci);
+ turn_on_io_watchdog(ehci);
+}
+
+static void disable_async(struct ehci_hcd *ehci)
+{
+ if (--ehci->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(ehci->async->qh_next.qh || ehci->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ ehci_poll_ASS(ehci);
+}
+
/* move qh (and its qtds) onto async queue; maybe enable queue. */
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
@@ -978,24 +998,11 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
WARN_ON(qh->qh_state != QH_STATE_IDLE);
- /* (re)start the async schedule? */
- head = ehci->async;
- timer_action_done (ehci, TIMER_ASYNC_OFF);
- if (!head->qh_next.qh) {
- if (!(ehci->command & CMD_ASE)) {
- /* in case a clear of CMD_ASE didn't take yet */
- (void)handshake(ehci, &ehci->regs->status,
- STS_ASS, 0, 150);
- ehci->command |= CMD_ASE;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- /* posted write need not be known to HC yet ... */
- }
- }
-
/* clear halt and/or toggle; and maybe recover from silicon quirk */
qh_refresh(ehci, qh);
/* splice right after start */
+ head = ehci->async;
qh->qh_next = head->qh_next;
qh->hw->hw_next = head->hw->hw_next;
wmb ();
@@ -1003,10 +1010,11 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
head->qh_next.qh = qh;
head->hw->hw_next = dma;
- qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
+
+ enable_async(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -1090,7 +1098,7 @@ static struct ehci_qh *qh_append_tds (
wmb ();
dummy->hw_token = token;
- urb->hcpriv = qh_get (qh);
+ urb->hcpriv = qh;
}
}
return qh;
@@ -1155,117 +1163,155 @@ submit_async (
/*-------------------------------------------------------------------------*/
-/* the async qh for the qtds being reclaimed are now unlinked from the HC */
-
-static void end_unlink_async (struct ehci_hcd *ehci)
+static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_qh *qh = ehci->reclaim;
- struct ehci_qh *next;
+ struct ehci_qh *prev;
- iaa_watchdog_done(ehci);
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (ehci->async_unlink)
+ ehci->async_unlink_last->unlink_next = qh;
+ else
+ ehci->async_unlink = qh;
+ ehci->async_unlink_last = qh;
- // qh->hw_next = cpu_to_hc32(qh->qh_dma);
- qh->qh_state = QH_STATE_IDLE;
- qh->qh_next.qh = NULL;
- qh_put (qh); // refcount from reclaim
+ /* Unlink it from the schedule */
+ prev = ehci->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
- /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
- next = qh->reclaim;
- ehci->reclaim = next;
- qh->reclaim = NULL;
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = qh->qh_next.qh;
+}
- qh_completions (ehci, qh);
+static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (ehci->async_iaa || ehci->async_unlinking)
+ return;
- if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
- qh_link_async (ehci, qh);
- } else {
- /* it's not free to turn the async schedule on/off; leave it
- * active but idle for a while once it empties.
- */
- if (ehci->rh_state == EHCI_RH_RUNNING
- && ehci->async->qh_next.qh == NULL)
- timer_action (ehci, TIMER_ASYNC_OFF);
- }
- qh_put(qh); /* refcount from async list */
+ /* Do all the waiting QHs at once */
+ ehci->async_iaa = ehci->async_unlink;
+ ehci->async_unlink = NULL;
- if (next) {
- ehci->reclaim = NULL;
- start_unlink_async (ehci, next);
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(ehci);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ ehci_writel(ehci, ehci->command | CMD_IAAD,
+ &ehci->regs->command);
+ ehci_readl(ehci, &ehci->regs->command);
+ ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
}
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
if (ehci->has_synopsys_hc_bug)
ehci_writel(ehci, (u32) ehci->async->qh_dma,
&ehci->regs->async_next);
-}
-
-/* makes sure the async qh will become idle */
-/* caller must own ehci->lock */
-static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
-{
- struct ehci_qh *prev;
+ /* Process the idle QHs */
+ restart:
+ ehci->async_unlinking = true;
+ while (ehci->async_iaa) {
+ qh = ehci->async_iaa;
+ ehci->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(ehci, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ ehci->rh_state == EHCI_RH_RUNNING)
+ qh_link_async(ehci, qh);
+ disable_async(ehci);
+ }
+ ehci->async_unlinking = false;
-#ifdef DEBUG
- assert_spin_locked(&ehci->lock);
- if (ehci->reclaim
- || (qh->qh_state != QH_STATE_LINKED
- && qh->qh_state != QH_STATE_UNLINK_WAIT)
- )
- BUG ();
-#endif
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (ehci->async_unlink) {
+ start_iaa_cycle(ehci, true);
+ if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
+ goto restart;
+ }
+}
- /* stop async schedule right now? */
- if (unlikely (qh == ehci->async)) {
- /* can't get here without STS_ASS set */
- if (ehci->rh_state != EHCI_RH_HALTED
- && !ehci->reclaim) {
- /* ... and CMD_IAAD clear */
- ehci->command &= ~CMD_ASE;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- wmb ();
- // handshake later, if we need to
- timer_action_done (ehci, TIMER_ASYNC_OFF);
+static void unlink_empty_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh, *next;
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = ehci->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ ehci->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(ehci, qh);
}
- return;
}
- qh->qh_state = QH_STATE_UNLINK;
- ehci->reclaim = qh = qh_get (qh);
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (ehci->async_unlink)
+ start_iaa_cycle(ehci, false);
- prev = ehci->async;
- while (prev->qh_next.qh != qh)
- prev = prev->qh_next.qh;
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
+ }
+}
- prev->hw->hw_next = qh->hw->hw_next;
- prev->qh_next = qh->qh_next;
- if (ehci->qh_scan_next == qh)
- ehci->qh_scan_next = qh->qh_next.qh;
- wmb ();
+/* makes sure the async qh will become idle */
+/* caller must own ehci->lock */
- /* If the controller isn't running, we don't have to wait for it */
- if (unlikely(ehci->rh_state != EHCI_RH_RUNNING)) {
- /* if (unlikely (qh->reclaim != 0))
- * this will recurse, probably not much
- */
- end_unlink_async (ehci);
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
return;
}
- ehci_writel(ehci, ehci->command | CMD_IAAD, &ehci->regs->command);
- (void)ehci_readl(ehci, &ehci->regs->command);
- iaa_watchdog_start(ehci);
+ single_unlink_async(ehci, qh);
+ start_iaa_cycle(ehci, false);
}
/*-------------------------------------------------------------------------*/
static void scan_async (struct ehci_hcd *ehci)
{
- bool stopped;
struct ehci_qh *qh;
- enum ehci_timer_action action = TIMER_IO_WATCHDOG;
-
- timer_action_done (ehci, TIMER_ASYNC_SHRINK);
- stopped = (ehci->rh_state != EHCI_RH_RUNNING);
+ bool check_unlinks_later = false;
ehci->qh_scan_next = ehci->async->qh_next.qh;
while (ehci->qh_scan_next) {
@@ -1281,33 +1327,30 @@ static void scan_async (struct ehci_hcd *ehci)
* drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted
- * in start_unlink_async().
+ * in single_unlink_async().
*/
- qh = qh_get(qh);
temp = qh_completions(ehci, qh);
- if (qh->needs_rescan)
- unlink_async(ehci, qh);
- qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
- qh_put(qh);
- if (temp != 0)
+ if (qh->needs_rescan) {
+ start_unlink_async(ehci, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = ehci->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
goto rescan;
}
+ }
- /* unlink idle entries, reducing DMA usage as well
- * as HCD schedule-scanning costs. delay for any qh
- * we just scanned, there's a not-unusual case that it
- * doesn't stay idle for long.
- * (plus, avoids some kind of re-activation race.)
- */
- if (list_empty(&qh->qtd_list)
- && qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim && (stopped ||
- time_after_eq(jiffies, qh->unlink_time)))
- start_unlink_async(ehci, qh);
- else
- action = TIMER_ASYNC_SHRINK;
- }
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING &&
+ !(ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
+ ++ehci->async_unlink_cycle;
}
- if (action == TIMER_ASYNC_SHRINK)
- timer_action (ehci, TIMER_ASYNC_SHRINK);
}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 33182c6d1ff9..7cf3da7babf0 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -479,70 +479,26 @@ static int tt_no_collision (
/*-------------------------------------------------------------------------*/
-static int enable_periodic (struct ehci_hcd *ehci)
+static void enable_periodic(struct ehci_hcd *ehci)
{
- int status;
-
- if (ehci->periodic_sched++)
- return 0;
-
- /* did clearing PSE did take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, 0, 9 * 125);
- if (status) {
- usb_hc_died(ehci_to_hcd(ehci));
- return status;
- }
+ if (ehci->periodic_count++)
+ return;
- ehci->command |= CMD_PSE;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- /* posted write ... PSS happens later */
+ /* Stop waiting to turn off the periodic schedule */
+ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
- /* make sure ehci_work scans these */
- ehci->next_uframe = ehci_read_frame_index(ehci)
- % (ehci->periodic_size << 3);
- if (unlikely(ehci->broken_periodic))
- ehci->last_periodic_enable = ktime_get_real();
- return 0;
+ /* Don't start the schedule until PSS is 0 */
+ ehci_poll_PSS(ehci);
+ turn_on_io_watchdog(ehci);
}
-static int disable_periodic (struct ehci_hcd *ehci)
+static void disable_periodic(struct ehci_hcd *ehci)
{
- int status;
-
- if (--ehci->periodic_sched)
- return 0;
-
- if (unlikely(ehci->broken_periodic)) {
- /* delay experimentally determined */
- ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000);
- ktime_t now = ktime_get_real();
- s64 delay = ktime_us_delta(safe, now);
-
- if (unlikely(delay > 0))
- udelay(delay);
- }
-
- /* did setting PSE not take effect yet?
- * takes effect only at frame boundaries...
- */
- status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
- STS_PSS, STS_PSS, 9 * 125);
- if (status) {
- usb_hc_died(ehci_to_hcd(ehci));
- return status;
- }
-
- ehci->command &= ~CMD_PSE;
- ehci_writel(ehci, ehci->command, &ehci->regs->command);
- /* posted write ... */
-
- free_cached_lists(ehci);
+ if (--ehci->periodic_count)
+ return;
- ehci->next_uframe = -1;
- return 0;
+ /* Don't turn off the schedule until PSS is 1 */
+ ehci_poll_PSS(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -553,7 +509,7 @@ static int disable_periodic (struct ehci_hcd *ehci)
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; ehci 0.96+)
*/
-static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
@@ -606,28 +562,38 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
- qh_get (qh);
/* update per-qh bandwidth for usbfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
+ list_add(&qh->intr_node, &ehci->intr_qh_list);
+
/* maybe enable periodic schedule processing */
- return enable_periodic(ehci);
+ ++ehci->intr_count;
+ enable_periodic(ehci);
}
-static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
- // FIXME:
- // IF this isn't high speed
- // and this qh is active in the current uframe
- // (and overlay token SplitXstate is false?)
- // THEN
- // qh->hw_info1 |= cpu_to_hc32(1 << 7 /* "ignore" */);
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
/* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0)
@@ -650,18 +616,15 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
- qh_put (qh);
- /* maybe turn off periodic schedule */
- return disable_periodic(ehci);
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = list_entry(qh->intr_node.next,
+ struct ehci_qh, intr_node);
+ list_del(&qh->intr_node);
}
-static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- unsigned wait;
- struct ehci_qh_hw *hw = qh->hw;
- int rc;
-
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
@@ -674,28 +637,45 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh_unlink_periodic (ehci, qh);
- /* simple/paranoid: always delay, expecting the HC needs to read
- * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
- * expect khubd to clean up after any CSPLITs we won't issue.
- * active high speed queues may need bigger delays...
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
*/
- if (list_empty (&qh->qtd_list)
- || (cpu_to_hc32(ehci, QH_CMASK)
- & hw->hw_info2) != 0)
- wait = 2;
+ qh->unlink_cycle = ehci->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (ehci->intr_unlink)
+ ehci->intr_unlink_last->unlink_next = qh;
else
- wait = 55; /* worst case: 3 * 1024 */
+ ehci->intr_unlink = qh;
+ ehci->intr_unlink_last = qh;
+
+ if (ehci->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_intr_unlinks(ehci);
+ else if (ehci->intr_unlink == qh) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ struct ehci_qh_hw *hw = qh->hw;
+ int rc;
- udelay (wait);
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = EHCI_LIST_END(ehci);
- wmb ();
qh_completions(ehci, qh);
/* reschedule QH iff another request is queued */
- if (!list_empty(&qh->qtd_list) &&
- ehci->rh_state == EHCI_RH_RUNNING) {
+ if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
rc = qh_schedule(ehci, qh);
/* An error here likely indicates handshake failure
@@ -708,6 +688,10 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
}
+
+ /* maybe turn off periodic schedule */
+ --ehci->intr_count;
+ disable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -884,7 +868,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
ehci_dbg (ehci, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
- status = qh_link_periodic (ehci, qh);
+ qh_link_periodic(ehci, qh);
done:
return status;
}
@@ -944,6 +928,35 @@ done_not_linked:
return status;
}
+static void scan_intr(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+
+ list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
+ intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(ehci, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(ehci, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
/* ehci_iso_stream ops work with both ITD and SITD */
@@ -958,7 +971,6 @@ iso_stream_alloc (gfp_t mem_flags)
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = -1;
- stream->refcount = 1;
}
return stream;
}
@@ -1058,57 +1070,6 @@ iso_stream_init (
stream->maxp = maxp;
}
-static void
-iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
-{
- stream->refcount--;
-
- /* free whenever just a dev->ep reference remains.
- * not like a QH -- no persistent state (toggle, halt)
- */
- if (stream->refcount == 1) {
- // BUG_ON (!list_empty(&stream->td_list));
-
- while (!list_empty (&stream->free_list)) {
- struct list_head *entry;
-
- entry = stream->free_list.next;
- list_del (entry);
-
- /* knows about ITD vs SITD */
- if (stream->highspeed) {
- struct ehci_itd *itd;
-
- itd = list_entry (entry, struct ehci_itd,
- itd_list);
- dma_pool_free (ehci->itd_pool, itd,
- itd->itd_dma);
- } else {
- struct ehci_sitd *sitd;
-
- sitd = list_entry (entry, struct ehci_sitd,
- sitd_list);
- dma_pool_free (ehci->sitd_pool, sitd,
- sitd->sitd_dma);
- }
- }
-
- stream->bEndpointAddress &= 0x0f;
- if (stream->ep)
- stream->ep->hcpriv = NULL;
-
- kfree(stream);
- }
-}
-
-static inline struct ehci_iso_stream *
-iso_stream_get (struct ehci_iso_stream *stream)
-{
- if (likely (stream != NULL))
- stream->refcount++;
- return stream;
-}
-
static struct ehci_iso_stream *
iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
{
@@ -1129,7 +1090,6 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
if (unlikely (stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
if (likely (stream != NULL)) {
- /* dev->ep owns the initial refcount */
ep->hcpriv = stream;
stream->ep = ep;
iso_stream_init(ehci, stream, urb->dev, urb->pipe,
@@ -1144,9 +1104,6 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
stream = NULL;
}
- /* caller guarantees an eventual matching iso_stream_put */
- stream = iso_stream_get (stream);
-
spin_unlock_irqrestore (&ehci->lock, flags);
return stream;
}
@@ -1254,17 +1211,19 @@ itd_urb_transaction (
spin_lock_irqsave (&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated itds */
- if (likely (!list_empty(&stream->free_list))) {
- itd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
struct ehci_itd, itd_list);
+ if (itd->frame == ehci->now_frame)
+ goto alloc_itd;
list_del (&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
+ alloc_itd:
spin_unlock_irqrestore (&ehci->lock, flags);
itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
&itd_dma);
@@ -1528,6 +1487,10 @@ iso_stream_schedule (
urb->start_frame = stream->next_uframe;
if (!stream->highspeed)
urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (ehci->isoc_count == 0)
+ ehci->next_frame = now >> 3;
return 0;
fail:
@@ -1615,8 +1578,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
}
/* fit urb's itds into the selected schedule slot; activate as needed */
-static int
-itd_link_urb (
+static void itd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -1659,7 +1621,7 @@ itd_link_urb (
itd = list_entry (iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail (&itd->itd_list, &stream->td_list);
- itd->stream = iso_stream_get (stream);
+ itd->stream = stream;
itd->urb = urb;
itd_init (ehci, stream, itd);
}
@@ -1686,8 +1648,8 @@ itd_link_urb (
iso_sched_free (stream, iso_sched);
urb->hcpriv = NULL;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
@@ -1702,11 +1664,8 @@ itd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-itd_complete (
- struct ehci_hcd *ehci,
- struct ehci_itd *itd
-) {
+static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
+{
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
@@ -1714,7 +1673,7 @@ itd_complete (
int urb_index = -1;
struct ehci_iso_stream *stream = itd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
@@ -1767,9 +1726,11 @@ itd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
- ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
@@ -1783,28 +1744,20 @@ itd_complete (
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
- iso_stream_put (ehci, stream);
done:
itd->urb = NULL;
- if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
- /* OK to recycle this ITD now. */
- itd->stream = NULL;
- list_move(&itd->itd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- } else {
- /* HW might remember this ITD, so we can't recycle it yet.
- * Move it to a safe place until a new frame starts.
- */
- list_move(&itd->itd_list, &ehci->cached_itd_list);
- if (stream->refcount == 2) {
- /* If iso_stream_put() were called here, stream
- * would be freed. Instead, just prevent reuse.
- */
- stream->ep->hcpriv = NULL;
- stream->ep = NULL;
- }
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_itd_list);
+ start_free_itds(ehci);
}
+
return retval;
}
@@ -1861,12 +1814,9 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (unlikely (status < 0))
- iso_stream_put (ehci, stream);
+ done:
return status;
}
@@ -1955,17 +1905,19 @@ sitd_urb_transaction (
* means we never need two sitds for full speed packets.
*/
- /* free_list.next might be cache-hot ... but maybe
- * the HC caches it too. avoid that issue for now.
+ /*
+ * Use siTDs from the free list, but not siTDs that may
+ * still be in use by the hardware.
*/
-
- /* prefer previously-allocated sitds */
- if (!list_empty(&stream->free_list)) {
- sitd = list_entry (stream->free_list.prev,
+ if (likely(!list_empty(&stream->free_list))) {
+ sitd = list_first_entry(&stream->free_list,
struct ehci_sitd, sitd_list);
+ if (sitd->frame == ehci->now_frame)
+ goto alloc_sitd;
list_del (&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
+ alloc_sitd:
spin_unlock_irqrestore (&ehci->lock, flags);
sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
&sitd_dma);
@@ -2034,8 +1986,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
}
/* fit urb's sitds into the selected schedule slot; activate as needed */
-static int
-sitd_link_urb (
+static void sitd_link_urb(
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
@@ -2081,7 +2032,7 @@ sitd_link_urb (
sitd = list_entry (sched->td_list.next,
struct ehci_sitd, sitd_list);
list_move_tail (&sitd->sitd_list, &stream->td_list);
- sitd->stream = iso_stream_get (stream);
+ sitd->stream = stream;
sitd->urb = urb;
sitd_patch(ehci, stream, sitd, sched, packet);
@@ -2096,8 +2047,8 @@ sitd_link_urb (
iso_sched_free (stream, sched);
urb->hcpriv = NULL;
- timer_action (ehci, TIMER_IO_WATCHDOG);
- return enable_periodic(ehci);
+ ++ehci->isoc_count;
+ enable_periodic(ehci);
}
/*-------------------------------------------------------------------------*/
@@ -2115,18 +2066,15 @@ sitd_link_urb (
* (b) only this endpoint's completions submit URBs. It seems some silicon
* corrupts things if you reuse completed descriptors very quickly...
*/
-static unsigned
-sitd_complete (
- struct ehci_hcd *ehci,
- struct ehci_sitd *sitd
-) {
+static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+{
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
int urb_index = -1;
struct ehci_iso_stream *stream = sitd->stream;
struct usb_device *dev;
- unsigned retval = false;
+ bool retval = false;
urb_index = sitd->index;
desc = &urb->iso_frame_desc [urb_index];
@@ -2163,9 +2111,11 @@ sitd_complete (
ehci_urb_done(ehci, urb, 0);
retval = true;
urb = NULL;
- (void) disable_periodic(ehci);
- ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
+ --ehci->isoc_count;
+ disable_periodic(ehci);
+
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
usb_amd_quirk_pll_enable();
@@ -2179,28 +2129,20 @@ sitd_complete (
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
- iso_stream_put (ehci, stream);
done:
sitd->urb = NULL;
- if (ehci->clock_frame != sitd->frame) {
- /* OK to recycle this SITD now. */
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- } else {
- /* HW might remember this SITD, so we can't recycle it yet.
- * Move it to a safe place until a new frame starts.
- */
- list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
- if (stream->refcount == 2) {
- /* If iso_stream_put() were called here, stream
- * would be freed. Instead, just prevent reuse.
- */
- stream->ep->hcpriv = NULL;
- stream->ep = NULL;
- }
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&sitd->sitd_list, &stream->free_list);
+
+ /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &ehci->cached_sitd_list);
+ start_free_itds(ehci);
}
+
return retval;
}
@@ -2254,74 +2196,39 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
-done_not_linked:
+ done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
-
-done:
- if (status < 0)
- iso_stream_put (ehci, stream);
+ done:
return status;
}
/*-------------------------------------------------------------------------*/
-static void free_cached_lists(struct ehci_hcd *ehci)
+static void scan_isoc(struct ehci_hcd *ehci)
{
- struct ehci_itd *itd, *n;
- struct ehci_sitd *sitd, *sn;
-
- list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
- struct ehci_iso_stream *stream = itd->stream;
- itd->stream = NULL;
- list_move(&itd->itd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- }
-
- list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
- struct ehci_iso_stream *stream = sitd->stream;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void
-scan_periodic (struct ehci_hcd *ehci)
-{
- unsigned now_uframe, frame, clock, clock_frame, mod;
- unsigned modified;
-
- mod = ehci->periodic_size << 3;
+ unsigned uf, now_frame, frame;
+ unsigned fmask = ehci->periodic_size - 1;
+ bool modified, live;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
- now_uframe = ehci->next_uframe;
- if (ehci->rh_state == EHCI_RH_RUNNING) {
- clock = ehci_read_frame_index(ehci);
- clock_frame = (clock >> 3) & (ehci->periodic_size - 1);
+ if (ehci->rh_state >= EHCI_RH_RUNNING) {
+ uf = ehci_read_frame_index(ehci);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
} else {
- clock = now_uframe + mod - 1;
- clock_frame = -1;
+ now_frame = (ehci->next_frame - 1) & fmask;
+ live = false;
}
- if (ehci->clock_frame != clock_frame) {
- free_cached_lists(ehci);
- ehci->clock_frame = clock_frame;
- }
- clock &= mod - 1;
- clock_frame = clock >> 3;
- ++ehci->periodic_stamp;
+ ehci->now_frame = now_frame;
+ frame = ehci->next_frame;
for (;;) {
union ehci_shadow q, *q_p;
__hc32 type, *hw_p;
- unsigned incomplete = false;
-
- frame = now_uframe >> 3;
restart:
/* scan each element in frame's queue for completions */
@@ -2329,48 +2236,17 @@ restart:
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(ehci, *hw_p);
- modified = 0;
+ modified = false;
while (q.ptr != NULL) {
- unsigned uf;
- union ehci_shadow temp;
- int live;
-
- live = (ehci->rh_state == EHCI_RH_RUNNING);
switch (hc32_to_cpu(ehci, type)) {
- case Q_TYPE_QH:
- /* handle any completions */
- temp.qh = qh_get (q.qh);
- type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
- q = q.qh->qh_next;
- if (temp.qh->stamp != ehci->periodic_stamp) {
- modified = qh_completions(ehci, temp.qh);
- if (!modified)
- temp.qh->stamp = ehci->periodic_stamp;
- if (unlikely(list_empty(&temp.qh->qtd_list) ||
- temp.qh->needs_rescan))
- intr_deschedule(ehci, temp.qh);
- }
- qh_put (temp.qh);
- break;
- case Q_TYPE_FSTN:
- /* for "save place" FSTNs, look at QH entries
- * in the previous frame for completions.
- */
- if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
- ehci_dbg(ehci,
- "ignoring completions from FSTNs\n");
- }
- type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
- q = q.fstn->fstn_next;
- break;
case Q_TYPE_ITD:
/* If this ITD is still active, leave it for
* later processing ... check the next entry.
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live) {
+ if (frame == now_frame && live) {
rmb();
for (uf = 0; uf < 8; uf++) {
if (q.itd->hw_transaction[uf] &
@@ -2378,7 +2254,6 @@ restart:
break;
}
if (uf < 8) {
- incomplete = true;
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2410,14 +2285,12 @@ restart:
* No need to check for activity unless the
* frame is current.
*/
- if (((frame == clock_frame) ||
- (((frame + 1) & (ehci->periodic_size - 1))
- == clock_frame))
+ if (((frame == now_frame) ||
+ (((frame + 1) & fmask) == now_frame))
&& live
&& (q.sitd->hw_results &
SITD_ACTIVE(ehci))) {
- incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci,
@@ -2445,58 +2318,23 @@ restart:
ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
// BUG ();
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
q.ptr = NULL;
+ break;
}
/* assume completion callbacks modify the queue */
- if (unlikely (modified)) {
- if (likely(ehci->periodic_sched > 0))
- goto restart;
- /* short-circuit this scan */
- now_uframe = clock;
- break;
- }
+ if (unlikely(modified && ehci->isoc_count > 0))
+ goto restart;
}
- /* If we can tell we caught up to the hardware, stop now.
- * We can't advance our scan without collecting the ISO
- * transfers that are still pending in this frame.
- */
- if (incomplete && ehci->rh_state == EHCI_RH_RUNNING) {
- ehci->next_uframe = now_uframe;
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
break;
- }
-
- // FIXME: this assumes we won't get lapped when
- // latencies climb; that should be rare, but...
- // detect it, and just go all the way around.
- // FLR might help detect this case, so long as latencies
- // don't exceed periodic_size msec (default 1.024 sec).
-
- // FIXME: likewise assumes HC doesn't halt mid-scan
-
- if (now_uframe == clock) {
- unsigned now;
-
- if (ehci->rh_state != EHCI_RH_RUNNING
- || ehci->periodic_sched == 0)
- break;
- ehci->next_uframe = now_uframe;
- now = ehci_read_frame_index(ehci) & (mod - 1);
- if (now_uframe == now)
- break;
-
- /* rescan the rest of this frame, then ... */
- clock = now;
- clock_frame = clock >> 3;
- if (ehci->clock_frame != clock_frame) {
- free_cached_lists(ehci);
- ehci->clock_frame = clock_frame;
- ++ehci->periodic_stamp;
- }
- } else {
- now_uframe++;
- now_uframe &= mod - 1;
- }
+ frame = (frame + 1) & fmask;
}
+ ehci->next_frame = now_frame;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index f7f3ce3275b8..65360945df78 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -445,12 +445,11 @@ static int controller_suspend(struct device *dev)
if (time_before(jiffies, ehci->next_statechange))
msleep(10);
- spin_lock_irqsave(&ehci->lock, flags);
+ ehci_halt(ehci);
+ spin_lock_irqsave(&ehci->lock, flags);
tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
- ehci_halt(ehci);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
spin_unlock_irqrestore(&ehci->lock, flags);
tegra_ehci_power_down(hcd);
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
new file mode 100644
index 000000000000..eb896a2c8f2e
--- /dev/null
+++ b/drivers/usb/host/ehci-timer.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2012 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/* This file is part of ehci-hcd.c */
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command |= bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit)
+{
+ ehci->command &= ~bit;
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+
+ /* unblock posted write */
+ ehci_readl(ehci, &ehci->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from ehci->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * ehci->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum ehci_hrtimer_event in ehci.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &ehci->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ ehci->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < ehci->next_hrtimer_event) {
+ ehci->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&ehci->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void ehci_poll_ASS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_ASE) ? STS_ASS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (ehci->ASS_poll_count++ < 20) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+ return;
+ }
+ ehci_warn(ehci, "Waited too long for the async schedule status, giving up\n");
+ }
+ ehci->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->async_count > 0)
+ ehci_set_command_bit(ehci, CMD_ASE);
+
+ } else { /* Running */
+ if (ehci->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void ehci_disable_ASE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void ehci_poll_PSS(struct ehci_hcd *ehci)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ want = (ehci->command & CMD_PSE) ? STS_PSS : 0;
+ actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (ehci->PSS_poll_count++ < 20) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+ return;
+ }
+ ehci_warn(ehci, "Waited too long for the periodic schedule status, giving up\n");
+ }
+ ehci->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (ehci->periodic_count > 0)
+ ehci_set_command_bit(ehci, CMD_PSE);
+
+ } else { /* Running */
+ if (ehci->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void ehci_disable_PSE(struct ehci_hcd *ehci)
+{
+ ehci_clear_command_bit(ehci, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void ehci_handle_controller_death(struct ehci_hcd *ehci)
+{
+ if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (ehci->died_poll_count++ < 5) {
+ /* Try again later */
+ ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ ehci->rh_state = EHCI_RH_HALTED;
+ ehci_writel(ehci, 0, &ehci->regs->configured_flag);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ ehci_work(ehci);
+ end_unlink_async(ehci);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ ehci->intr_unlinking = true;
+ while (ehci->intr_unlink) {
+ struct ehci_qh *qh = ehci->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
+ break;
+ ehci->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (ehci->intr_unlink) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
+ ++ehci->intr_unlink_cycle;
+ }
+ ehci->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct ehci_hcd *ehci)
+{
+ if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) {
+ ehci->last_itd_to_free = list_entry(
+ ehci->cached_itd_list.prev,
+ struct ehci_itd, itd_list);
+ ehci->last_sitd_to_free = list_entry(
+ ehci->cached_sitd_list.prev,
+ struct ehci_sitd, sitd_list);
+ ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct ehci_hcd *ehci)
+{
+ struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
+
+ if (ehci->rh_state < EHCI_RH_RUNNING) {
+ ehci->last_itd_to_free = NULL;
+ ehci->last_sitd_to_free = NULL;
+ }
+
+ list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(ehci->itd_pool, itd, itd->itd_dma);
+ if (itd == ehci->last_itd_to_free)
+ break;
+ }
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ list_del(&sitd->sitd_list);
+ dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma);
+ if (sitd == ehci->last_sitd_to_free)
+ break;
+ }
+
+ if (!list_empty(&ehci->cached_itd_list) ||
+ !list_empty(&ehci->cached_sitd_list))
+ start_free_itds(ehci);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
+{
+ if (ehci->rh_state != EHCI_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (ehci->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = ehci_readl(ehci, &ehci->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = ehci_readl(ehci, &ehci->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(ehci->stats.lost_iaa);
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+ }
+
+ ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(ehci);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct ehci_hcd *ehci)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (ehci->rh_state != EHCI_RH_RUNNING ||
+ (ehci->enabled_hrtimer_events &
+ BIT(EHCI_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (ehci->isoc_count > 0 || (ehci->need_io_watchdog &&
+ ehci->async_count + ehci->intr_count > 0))
+ ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum ehci_hrtimer_event in ehci.h.
+ */
+static void (*event_handlers[])(struct ehci_hcd *) = {
+ ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */
+ ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */
+ ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
+ ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
+ ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
+ ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
+ ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */
+ ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t)
+{
+ struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ events = ehci->enabled_hrtimer_events;
+ ehci->enabled_hrtimer_events = 0;
+ ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= ehci->hr_timeouts[e].tv64)
+ event_handlers[e](ehci);
+ else
+ ehci_enable_event(ehci, e, false);
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return HRTIMER_NORESTART;
+}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 85c3572155d1..da07d98f7d1d 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -42,7 +42,7 @@ struct ehci_stats {
/* irq usage */
unsigned long normal;
unsigned long error;
- unsigned long reclaim;
+ unsigned long iaa;
unsigned long lost_iaa;
/* termination of urbs from core */
@@ -51,7 +51,7 @@ struct ehci_stats {
};
/* ehci_hcd->lock guards shared data against other CPUs:
- * ehci_hcd: async, reclaim, periodic (and shadow), ...
+ * ehci_hcd: async, unlink, periodic (and shadow), ...
* usb_host_endpoint: hcpriv
* ehci_qh: qh_next, qtd_list
* ehci_qtd: qtd_list
@@ -62,13 +62,48 @@ struct ehci_stats {
#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */
+/*
+ * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
enum ehci_rh_state {
EHCI_RH_HALTED,
EHCI_RH_SUSPENDED,
- EHCI_RH_RUNNING
+ EHCI_RH_RUNNING,
+ EHCI_RH_STOPPING
};
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum ehci_hrtimer_event {
+ EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ EHCI_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define EHCI_HRTIMER_NO_EVENT 99
+
struct ehci_hcd { /* one per controller */
+ /* timing support */
+ enum ehci_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
/* glue to PCI and HCD framework */
struct ehci_caps __iomem *caps;
struct ehci_regs __iomem *regs;
@@ -78,30 +113,48 @@ struct ehci_hcd { /* one per controller */
spinlock_t lock;
enum ehci_rh_state rh_state;
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct ehci_qh *qh_scan_next;
+
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *dummy; /* For AMD quirk use */
- struct ehci_qh *reclaim;
- struct ehci_qh *qh_scan_next;
- unsigned scanning : 1;
+ struct ehci_qh *async_unlink;
+ struct ehci_qh *async_unlink_last;
+ struct ehci_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
unsigned periodic_size;
__hc32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
- int next_uframe; /* scan periodic, start here */
- unsigned periodic_sched; /* periodic activity count */
+ struct ehci_qh *intr_unlink;
+ struct ehci_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
unsigned uframe_periodic_max; /* max periodic time per uframe */
- /* list of itds & sitds completed while clock_frame was still active */
+ /* list of itds & sitds completed while now_frame was still active */
struct list_head cached_itd_list;
+ struct ehci_itd *last_itd_to_free;
struct list_head cached_sitd_list;
- unsigned clock_frame;
+ struct ehci_sitd *last_sitd_to_free;
/* per root hub port */
unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
@@ -126,10 +179,6 @@ struct ehci_hcd { /* one per controller */
struct dma_pool *itd_pool; /* itd per iso urb */
struct dma_pool *sitd_pool; /* sitd per split iso urb */
- struct timer_list iaa_watchdog;
- struct timer_list watchdog;
- unsigned long actions;
- unsigned periodic_stamp;
unsigned random_frame;
unsigned long next_statechange;
ktime_t last_periodic_enable;
@@ -143,7 +192,6 @@ struct ehci_hcd { /* one per controller */
unsigned big_endian_capbase:1;
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
- unsigned broken_periodic:1;
unsigned amd_pll_fix:1;
unsigned fs_i_thresh:1; /* Intel iso scheduling */
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
@@ -187,34 +235,6 @@ static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
}
-
-static inline void
-iaa_watchdog_start(struct ehci_hcd *ehci)
-{
- WARN_ON(timer_pending(&ehci->iaa_watchdog));
- mod_timer(&ehci->iaa_watchdog,
- jiffies + msecs_to_jiffies(EHCI_IAA_MSECS));
-}
-
-static inline void iaa_watchdog_done(struct ehci_hcd *ehci)
-{
- del_timer(&ehci->iaa_watchdog);
-}
-
-enum ehci_timer_action {
- TIMER_IO_WATCHDOG,
- TIMER_ASYNC_SHRINK,
- TIMER_ASYNC_OFF,
-};
-
-static inline void
-timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
-{
- clear_bit (action, &ehci->actions);
-}
-
-static void free_cached_lists(struct ehci_hcd *ehci);
-
/*-------------------------------------------------------------------------*/
#include <linux/usb/ehci_def.h>
@@ -324,7 +344,13 @@ union ehci_shadow {
struct ehci_qh_hw {
__hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */
-#define QH_HEAD 0x00008000
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
__hc32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
@@ -342,32 +368,23 @@ struct ehci_qh_hw {
} __attribute__ ((aligned(32)));
struct ehci_qh {
- struct ehci_qh_hw *hw;
+ struct ehci_qh_hw *hw; /* Must come first */
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
struct ehci_qtd *dummy;
- struct ehci_qh *reclaim; /* next to reclaim */
-
- struct ehci_hcd *ehci;
- unsigned long unlink_time;
+ struct ehci_qh *unlink_next; /* next on unlink list */
- /*
- * Do NOT use atomic operations for QH refcounting. On some CPUs
- * (PPC7448 for example), atomic operations cannot be performed on
- * memory that is cache-inhibited (i.e. being used for DMA).
- * Spinlocks are used to protect all QH fields.
- */
- u32 refcount;
- unsigned stamp;
+ unsigned unlink_cycle;
u8 needs_rescan; /* Dequeue during giveback */
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
#define QH_STATE_IDLE 3 /* HC doesn't see this */
-#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
u8 xacterrs; /* XactErr retry counter */
@@ -417,7 +434,6 @@ struct ehci_iso_stream {
/* first field matches ehci_hq, but is NULL */
struct ehci_qh_hw *hw;
- u32 refcount;
u8 bEndpointAddress;
u8 highspeed;
struct list_head td_list; /* queued itds/sitds */
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 6d21030e2b7b..74bfc868b7ad 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
}
}
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+ /* resume state is a xHCI internal state.
+ * Do not report it to usb core.
+ */
+ if (pls == XDEV_RESUME)
+ return;
+
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+ if (status_reg & PORT_CAS) {
+ /* The CAS bit can be set while the port is
+ * in any link state.
+ * Only roothubs have CAS bit, so we
+ * pretend to be in compliance mode
+ * unless we're already in compliance
+ * or the inactive state.
+ */
+ if (pls != USB_SS_PORT_LS_COMP_MOD &&
+ pls != USB_SS_PORT_LS_SS_INACTIVE) {
+ pls = USB_SS_PORT_LS_COMP_MOD;
+ }
+ /* Return also connection bit -
+ * hub state machine resets port
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ }
+ /* update status field */
+ *status |= pls;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -612,13 +648,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
else
status |= USB_PORT_STAT_POWER;
}
- /* Port Link State */
+ /* Update Port Link State for super speed ports*/
if (hcd->speed == HCD_USB3) {
- /* resume state is a xHCI internal state.
- * Do not report it to usb core.
- */
- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
- status |= (temp & PORT_PLS_MASK);
+ xhci_hub_report_link_state(&status, temp);
}
if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 23b4aefd1036..8275645889da 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index de3d6e3e57be..55c0785810c9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -341,7 +341,11 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 81423f7361db..d47eb06fe463 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
- /*
- * Force low_latency on so that our tty_push actually forces the data
- * through, otherwise it is scheduled, and with high data rates (like
- * with OHCI) data can get lost.
- */
- if (tty)
- tty->low_latency = 1;
-
/* Clear the urb pipe. */
usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a0382b24866e..2b0c88da7828 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -497,6 +497,15 @@ static void option_instat_callback(struct urb *urb);
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM 0x7101
+#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_FP_1COM 0x0003
+#define MEDIATEK_PRODUCT_FP_2COM 0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
/* Cellient products */
#define CELLIENT_VENDOR_ID 0x2692
@@ -554,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
+static const struct option_blacklist_info net_intf2_blacklist = {
+ .reserved = BIT(2),
+};
+
static const struct option_blacklist_info net_intf3_blacklist = {
.reserved = BIT(3),
};
@@ -1099,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1240,6 +1255,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 11418da9bc09..a3d54366afcc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -236,6 +236,11 @@ static int slave_configure(struct scsi_device *sdev)
US_FL_SCM_MULT_TARG)) &&
us->protocol == USB_PR_BULK)
us->use_last_sector_hacks = 1;
+
+ /* Check if write cache default on flag is set or not */
+ if (us->fflags & US_FL_WRITE_CACHE)
+ sdev->wce_default_on = 1;
+
} else {
/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1719886bb9be..62a31bea0634 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1267,6 +1267,12 @@ UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0 ),
+/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
+UNUSUAL_DEV(0x0bc2, 0x2300, 0x0000, 0x9999,
+ "Seagate",
+ "Portable HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
@@ -1468,6 +1474,12 @@ UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE),
+/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
+UNUSUAL_DEV(0x1058, 0x070a, 0x0000, 0x9999,
+ "Western Digital",
+ "My Passport HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
+
/* Reported by Fabio Venturi <f.venturi@tdnet.it>
* The device reports a vendor-specific bDeviceClass.
*/
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index e23c30ab66da..d012fe4329e7 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -473,7 +473,7 @@ static void adjust_quirks(struct us_data *us)
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
- US_FL_INITIAL_READ10);
+ US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE);
p = quirks;
while (*p) {
@@ -529,6 +529,9 @@ static void adjust_quirks(struct us_data *us)
case 'o':
f |= US_FL_CAPACITY_OK;
break;
+ case 'p':
+ f |= US_FL_WRITE_CACHE;
+ break;
case 'r':
f |= US_FL_IGNORE_RESIDUE;
break;