summaryrefslogtreecommitdiffstats
path: root/drivers/net/team/team.c
diff options
context:
space:
mode:
authorJiri Pirko2011-11-16 12:09:08 +0100
committerDavid S. Miller2011-11-17 00:16:37 +0100
commit61dc3461b9549bc10a2f16d254250680cadafcce (patch)
tree9072edbb001647be7731ddb192355b59bdb82315 /drivers/net/team/team.c
parentteam: Do not hold rcu_read_lock when running netlink cmds (diff)
downloadkernel-qcow2-linux-61dc3461b9549bc10a2f16d254250680cadafcce.tar.gz
kernel-qcow2-linux-61dc3461b9549bc10a2f16d254250680cadafcce.tar.xz
kernel-qcow2-linux-61dc3461b9549bc10a2f16d254250680cadafcce.zip
team: convert overall spinlock to mutex
No need to have spinlock for this purpose. So convert this to mutex and avoid current schedule while atomic problems in netlink code. Signed-off-by: Jiri Pirko <jpirko@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/team/team.c')
-rw-r--r--drivers/net/team/team.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e5390c73a75d..7db219cd3153 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -443,9 +443,9 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
__team_compute_features(team);
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
}
static int team_port_enter(struct team *team, struct team_port *port)
@@ -647,7 +647,7 @@ static int team_init(struct net_device *dev)
int i;
team->dev = dev;
- spin_lock_init(&team->lock);
+ mutex_init(&team->lock);
team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -672,13 +672,13 @@ static void team_uninit(struct net_device *dev)
struct team_port *port;
struct team_port *tmp;
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
list_for_each_entry_safe(port, tmp, &team->port_list, list)
team_port_del(team, port->dev);
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
}
static void team_destructor(struct net_device *dev)
@@ -784,7 +784,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
* Alhough this is reader, it's guarded by team lock. It's not possible
* to traverse list in reverse under rcu_read_lock
*/
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
if (err) {
@@ -793,7 +793,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
goto unwind;
}
}
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
dev->mtu = new_mtu;
@@ -802,7 +802,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
return err;
}
@@ -880,9 +880,9 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
err = team_port_add(team, port_dev);
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
return err;
}
@@ -891,9 +891,9 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
err = team_port_del(team, port_dev);
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
return err;
}
@@ -1064,13 +1064,13 @@ static struct team *team_nl_team_get(struct genl_info *info)
}
team = netdev_priv(dev);
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
return team;
}
static void team_nl_team_put(struct team *team)
{
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
dev_put(team->dev);
}
@@ -1486,9 +1486,9 @@ static void team_port_change_check(struct team_port *port, bool linkup)
{
struct team *team = port->team;
- spin_lock(&team->lock);
+ mutex_lock(&team->lock);
__team_port_change_check(port, linkup);
- spin_unlock(&team->lock);
+ mutex_unlock(&team->lock);
}
/************************************