summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2005-07-27 11:45:33 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 16:26:12 -0700
commitcabe3cbcbb3b09637b9e706c49eadb180fca057e (patch)
tree37c9179b4f43d7a63e7d55ae6a77a9fb44537b0c /drivers
parent29bb33dd87dbe8db07c2b19df3fb453d999c96de (diff)
[PATCH] IB: Fix a couple of MAD code paths
Fixed locking to handle error posting MAD send work requests. Fixed handling canceling a MAD with an active work request. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/mad.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1d8f26f54ec..8216af0ba78 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -841,6 +841,7 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_qp_info *qp_info;
struct ib_send_wr *bad_send_wr;
+ struct list_head *list;
unsigned long flags;
int ret;
@@ -850,22 +851,20 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
- if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) {
- list_add_tail(&mad_send_wr->mad_list.list,
- &qp_info->send_queue.list);
- spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
+ if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp,
&mad_send_wr->send_wr, &bad_send_wr);
- if (ret) {
- printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
- dequeue_mad(&mad_send_wr->mad_list);
- }
+ list = &qp_info->send_queue.list;
} else {
- list_add_tail(&mad_send_wr->mad_list.list,
- &qp_info->overflow_list);
- spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
ret = 0;
+ list = &qp_info->overflow_list;
}
+
+ if (!ret) {
+ qp_info->send_queue.count++;
+ list_add_tail(&mad_send_wr->mad_list.list, list);
+ }
+ spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
return ret;
}
@@ -2023,8 +2022,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
}
static struct ib_mad_send_wr_private*
-find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
- u64 wr_id)
+find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
{
struct ib_mad_send_wr_private *mad_send_wr;
@@ -2047,6 +2045,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr;
unsigned long flags;
+ int active;
mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
agent);
@@ -2057,13 +2056,14 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
return -EINVAL;
}
+ active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
if (!timeout_ms) {
mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
}
mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
- if (!mad_send_wr->timeout || mad_send_wr->refcount > 1)
+ if (active)
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
else
ib_reset_mad_timeout(mad_send_wr, timeout_ms);