From 94aa743a2af455ee3bd9fc3410dff82f6abf4522 Mon Sep 17 00:00:00 2001 From: Neerav Parikh Date: Tue, 15 Jan 2013 15:42:38 -0800 Subject: fcoe: Fix deadlock while deleting FCoE interface with NPIV ports This patch fixes following deadlock caused by destroying of an FCoE interface with active NPIV ports on that interface. Call Trace: [] schedule+0x64/0x66 [] schedule_timeout+0x36/0xe3 [] ? update_curr+0xd6/0x110 [] ? hrtick_update+0x1b/0x4d [] ? dequeue_task_fair+0x1ca/0x1d9 [] ? need_resched+0x1e/0x28 [] wait_for_common+0x9b/0xf1 [] ? try_to_wake_up+0x1e0/0x1e0 [] wait_for_completion+0x1d/0x1f [] flush_workqueue+0x116/0x2a1 [] drain_workqueue+0x66/0x14c [] destroy_workqueue+0x1a/0xcf [] fc_remove_host+0x154/0x17f [scsi_transport_fc] [] fcoe_if_destroy+0x184/0x1c9 [fcoe] [] fcoe_destroy_work+0x2b/0x44 [fcoe] [] process_one_work+0x1a8/0x2a4 [] ? fcoe_if_destroy+0x1c9/0x1c9 [fcoe] [] worker_thread+0x1db/0x268 [] ? wake_up_bit+0x2a/0x2a [] ? manage_workers.clone.16+0x1f6/0x1f6 [] kthread+0x6f/0x77 [] kernel_thread_helper+0x4/0x10 [] ? kthread_freezable_should_stop+0x4b/0x4b Call Trace: [] schedule+0x64/0x66 [] schedule_preempt_disabled+0xe/0x10 [] __mutex_lock_common.clone.5+0x117/0x17a [] __mutex_lock_slowpath+0x13/0x15 [] mutex_lock+0x23/0x37 [] ? list_del+0x11/0x30 [] fcoe_vport_destroy+0x43/0x5f [fcoe] [] fc_vport_terminate+0x48/0x110 [scsi_transport_fc] [] fc_vport_sched_delete+0x1d/0x79 [scsi_transport_fc] [] process_one_work+0x1a8/0x2a4 [] ? fc_vport_terminate+0x110/0x110 [scsi_transport_fc] [] worker_thread+0x1db/0x268 [] ? manage_workers.clone.16+0x1f6/0x1f6 [] kthread+0x6f/0x77 [] kernel_thread_helper+0x4/0x10 [] ? kthread_freezable_should_stop+0x4b/0x4b [] ? gs_change+0x13/0x13 A prior attempt to fix this issue is posted here: http://lists.open-fcoe.org/pipermail/devel/2012-October/012318.html or http://article.gmane.org/gmane.linux.scsi.open-fcoe.devel/11924 Based on feedback and discussion with Neil Horman it seems that the above patch may have a case where the fcoe_vport_destroy() and fcoe_destroy_work() can race; hence that patch has been withdrawn with this patch that is trying to solve the same problem in a different way. In the current approach instead of removing the fcoe_config_mutex from the vport_delete callback function; I've chosen to delete all the NPIV ports first on a given root lport before continuing with the removal of the root lport. Signed-off-by: Neerav Parikh Tested-by: Marcus Dennis Acked-by: Neil Horman Signed-off-by: Robert Love --- drivers/scsi/fcoe/fcoe.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/scsi/fcoe/fcoe.c') diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d605700f68cb..b5d92fc93c70 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2196,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work) { struct fcoe_port *port; struct fcoe_interface *fcoe; + struct Scsi_Host *shost; + struct fc_host_attrs *fc_host; + unsigned long flags; + struct fc_vport *vport; + struct fc_vport *next_vport; port = container_of(work, struct fcoe_port, destroy_work); + shost = port->lport->host; + fc_host = shost_to_fc_host(shost); + + /* Loop through all the vports and mark them for deletion */ + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { + continue; + } else { + vport->flags |= FC_VPORT_DELETING; + queue_work(fc_host_work_q(shost), + &vport->vport_delete_work); + } + } + spin_unlock_irqrestore(shost->host_lock, flags); + + flush_workqueue(fc_host_work_q(shost)); + mutex_lock(&fcoe_config_mutex); fcoe = port->priv; -- cgit v1.2.3-55-g7522