diff mbox

[06/10] bna: Mboxq Flush When IOC Disabled

Message ID 1311358069-32067-7-git-send-email-rmody@brocade.com
State Accepted, archived
Delegated to: David Miller
Headers show

Commit Message

Rasesh Mody July 22, 2011, 6:07 p.m. UTC
Change details:
 - If there is a command pending in the mailbox, bfa_ioc_mbox_queue() enqueues
   the mailbox command to a pending command queue. Entries in this queue are
   not flushed when IOC is disabled. As a result, when IOC is re-enabled again,
   the stale entries in the pending command queue are posted to the mailbox.
   When these mailbox commands are processed by the FW and responses are sent,
   unexpected  events are received by other modules' FSMs (f.e. bfa_msgq) which
   have not posted any mailbox commands after IOC was enabled.
 - Flush the pending mailbox command queue when IOC is disabled. Rename
   bfa_ioc_mbox_hbfail to bfa_ioc_mbox_flush. Call bfa_ioc_mbox_flush
   from bfa_iocpf_sm_disabled_entry()

Signed-off-by: Rasesh Mody <rmody@brocade.com>
---
 drivers/net/bna/bfa_ioc.c |    7 ++++---
 1 files changed, 4 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index a13b4c5..889039c 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -68,7 +68,7 @@  static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
 static void bfa_ioc_recover(struct bfa_ioc *ioc);
 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
@@ -938,6 +938,7 @@  bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
 {
+	bfa_ioc_mbox_flush(iocpf->ioc);
 	bfa_ioc_pf_disabled(iocpf->ioc);
 }
 
@@ -1054,7 +1055,7 @@  bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
 	/**
 	 * Flush any queued up mailbox requests.
 	 */
-	bfa_ioc_mbox_hbfail(iocpf->ioc);
+	bfa_ioc_mbox_flush(iocpf->ioc);
 	bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
@@ -1644,7 +1645,7 @@  bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
  * Cleanup any pending requests.
  */
 static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
 {
 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
 	struct bfa_mbox_cmd *cmd;