summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/tb.c')
-rw-r--r--drivers/thunderbolt/tb.c240
1 files changed, 137 insertions, 103 deletions
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 24b6d30c3c86..1b02ca0b6129 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -7,11 +7,24 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include "tb.h"
#include "tb_regs.h"
#include "tunnel_pci.h"
+/**
+ * struct tb_cm - Simple Thunderbolt connection manager
+ * @tunnel_list: List of active tunnels
+ * @hotplug_active: tb_handle_hotplug will stop progressing plug
+ * events and exit if this is not set (it needs to
+ * acquire the lock one more time). Used to drain wq
+ * after cfg has been paused.
+ */
+struct tb_cm {
+ struct list_head tunnel_list;
+ bool hotplug_active;
+};
/* enumeration & hot plug handling */
@@ -49,9 +62,23 @@ static void tb_scan_port(struct tb_port *port)
tb_port_WARN(port, "port already has a remote!\n");
return;
}
- sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
+ sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+ tb_downstream_route(port));
if (!sw)
return;
+
+ if (tb_switch_configure(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ sw->authorized = true;
+
+ if (tb_switch_add(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
port->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = port;
tb_scan_switch(sw);
@@ -62,12 +89,14 @@ static void tb_scan_port(struct tb_port *port)
*/
static void tb_free_invalid_tunnels(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
- {
+
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
if (tb_pci_is_invalid(tunnel)) {
tb_pci_deactivate(tunnel);
+ list_del(&tunnel->list);
tb_pci_free(tunnel);
}
}
@@ -86,7 +115,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
if (!port->remote)
continue;
if (port->remote->sw->is_unplugged) {
- tb_switch_free(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else {
tb_free_unplugged_children(port->remote->sw);
@@ -121,8 +150,8 @@ static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
continue;
if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
continue;
- cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
- if (cap <= 0)
+ cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
+ if (cap < 0)
continue;
res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
if (res < 0)
@@ -149,6 +178,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
struct tb_port *up_port;
struct tb_port *down_port;
struct tb_pci_tunnel *tunnel;
+ struct tb_cm *tcm = tb_priv(tb);
+
/* scan for pcie devices at depth 1*/
for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
if (tb_is_upstream_port(&tb->root_switch->ports[i]))
@@ -165,8 +196,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
}
/* check whether port is already activated */
- cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
- if (cap <= 0)
+ cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
+ if (cap < 0)
continue;
if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
continue;
@@ -195,6 +226,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_pci_free(tunnel);
}
+ list_add(&tunnel->list, &tcm->tunnel_list);
}
}
@@ -217,10 +249,11 @@ static void tb_handle_hotplug(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
struct tb *tb = ev->tb;
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
struct tb_port *port;
mutex_lock(&tb->lock);
- if (!tb->hotplug_active)
+ if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
sw = get_switch_at_route(tb->root_switch, ev->route);
@@ -248,7 +281,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_info(port, "unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
- tb_switch_free(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else {
tb_port_info(port,
@@ -281,137 +314,108 @@ out:
*
* Delegates to tb_handle_hotplug.
*/
-static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
- bool unplug)
+static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
{
- struct tb *tb = data;
- struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ const struct cfg_event_pkg *pkg = buf;
+ struct tb_hotplug_event *ev;
+ u64 route;
+
+ if (type != TB_CFG_PKG_EVENT) {
+ tb_warn(tb, "unexpected event %#x, ignoring\n", type);
+ return;
+ }
+
+ route = tb_cfg_get_route(&pkg->header);
+
+ if (tb_cfg_error(tb->ctl, route, pkg->port,
+ TB_CFG_ERROR_ACK_PLUG_EVENT)) {
+ tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
+ pkg->port);
+ }
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
INIT_WORK(&ev->work, tb_handle_hotplug);
ev->tb = tb;
ev->route = route;
- ev->port = port;
- ev->unplug = unplug;
+ ev->port = pkg->port;
+ ev->unplug = pkg->unplug;
queue_work(tb->wq, &ev->work);
}
-/**
- * thunderbolt_shutdown_and_free() - shutdown everything
- *
- * Free all switches and the config channel.
- *
- * Used in the error path of thunderbolt_alloc_and_start.
- */
-void thunderbolt_shutdown_and_free(struct tb *tb)
+static void tb_stop(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
- mutex_lock(&tb->lock);
-
/* tunnels are only present after everything has been initialized */
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
tb_pci_deactivate(tunnel);
tb_pci_free(tunnel);
}
-
- if (tb->root_switch)
- tb_switch_free(tb->root_switch);
- tb->root_switch = NULL;
-
- if (tb->ctl) {
- tb_ctl_stop(tb->ctl);
- tb_ctl_free(tb->ctl);
- }
- tb->ctl = NULL;
- tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
-
- /* allow tb_handle_hotplug to acquire the lock */
- mutex_unlock(&tb->lock);
- if (tb->wq) {
- flush_workqueue(tb->wq);
- destroy_workqueue(tb->wq);
- tb->wq = NULL;
- }
- mutex_destroy(&tb->lock);
- kfree(tb);
+ tb_switch_remove(tb->root_switch);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
-/**
- * thunderbolt_alloc_and_start() - setup the thunderbolt bus
- *
- * Allocates a tb_cfg control channel, initializes the root switch, enables
- * plug events and activates pci devices.
- *
- * Return: Returns NULL on error.
- */
-struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
+static int tb_start(struct tb *tb)
{
- struct tb *tb;
-
- BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
- BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
- BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+ struct tb_cm *tcm = tb_priv(tb);
+ int ret;
- tb = kzalloc(sizeof(*tb), GFP_KERNEL);
- if (!tb)
- return NULL;
-
- tb->nhi = nhi;
- mutex_init(&tb->lock);
- mutex_lock(&tb->lock);
- INIT_LIST_HEAD(&tb->tunnel_list);
-
- tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
- if (!tb->wq)
- goto err_locked;
+ tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+ if (!tb->root_switch)
+ return -ENOMEM;
- tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
- if (!tb->ctl)
- goto err_locked;
/*
- * tb_schedule_hotplug_handler may be called as soon as the config
- * channel is started. Thats why we have to hold the lock here.
+ * ICM firmware upgrade needs running firmware and in native
+ * mode that is not available so disable firmware upgrade of the
+ * root switch.
*/
- tb_ctl_start(tb->ctl);
+ tb->root_switch->no_nvm_upgrade = true;
- tb->root_switch = tb_switch_alloc(tb, 0);
- if (!tb->root_switch)
- goto err_locked;
+ ret = tb_switch_configure(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
+
+ /* Announce the switch to the world */
+ ret = tb_switch_add(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
tb_activate_pcie_devices(tb);
/* Allow tb_handle_hotplug to progress events */
- tb->hotplug_active = true;
- mutex_unlock(&tb->lock);
- return tb;
-
-err_locked:
- mutex_unlock(&tb->lock);
- thunderbolt_shutdown_and_free(tb);
- return NULL;
+ tcm->hotplug_active = true;
+ return 0;
}
-void thunderbolt_suspend(struct tb *tb)
+static int tb_suspend_noirq(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+
tb_info(tb, "suspending...\n");
- mutex_lock(&tb->lock);
tb_switch_suspend(tb->root_switch);
- tb_ctl_stop(tb->ctl);
- tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
- mutex_unlock(&tb->lock);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_info(tb, "suspend finished\n");
+
+ return 0;
}
-void thunderbolt_resume(struct tb *tb)
+static int tb_resume_noirq(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel, *n;
+
tb_info(tb, "resuming...\n");
- mutex_lock(&tb->lock);
- tb_ctl_start(tb->ctl);
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0);
@@ -419,9 +423,9 @@ void thunderbolt_resume(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
- list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_pci_restart(tunnel);
- if (!list_empty(&tb->tunnel_list)) {
+ if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
* 100ms works for me...
@@ -430,7 +434,37 @@ void thunderbolt_resume(struct tb *tb)
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
- tb->hotplug_active = true;
- mutex_unlock(&tb->lock);
+ tcm->hotplug_active = true;
tb_info(tb, "resume finished\n");
+
+ return 0;
+}
+
+static const struct tb_cm_ops tb_cm_ops = {
+ .start = tb_start,
+ .stop = tb_stop,
+ .suspend_noirq = tb_suspend_noirq,
+ .resume_noirq = tb_resume_noirq,
+ .handle_event = tb_handle_event,
+};
+
+struct tb *tb_probe(struct tb_nhi *nhi)
+{
+ struct tb_cm *tcm;
+ struct tb *tb;
+
+ if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ return NULL;
+
+ tb = tb_domain_alloc(nhi, sizeof(*tcm));
+ if (!tb)
+ return NULL;
+
+ tb->security_level = TB_SECURITY_NONE;
+ tb->cm_ops = &tb_cm_ops;
+
+ tcm = tb_priv(tb);
+ INIT_LIST_HEAD(&tcm->tunnel_list);
+
+ return tb;
}