xscore-y := xscore_impl.o xs_ud.o xscore_api.o xsmp.o \
xscore_stats.o xscore_uadm.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8040\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
vhba_attr.o vhba_wq.o vhba_proc.o vhba_stats.o vhba_ib.o \
vhba_scsi_intf.o vhba_align.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8040\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
obj-$(CONFIG_INFINIBAND_XSVNIC) := xsvnic.o
xsvnic-y := xsvnic_main.o xsvnic_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8040\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
xve-y := xve_main.o xve_verbs.o xve_multicast.o xve_ib.o xve_tables.o \
xve_ethtool.o xve_cm.o xve_stats.o
-ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8039\"
+ccflags-y += -DXSIGO_LOCAL_VERSION=\"6.0.r8040\"
ccflags-y += -DRDMA_PORT_LINK_LAYER_CHANGES -DHAS_SKB_ACCESS_FUNCTIONS
ccflags-y += -DSCSI_STRUCT_CHANGES -DSCSI_TIMEOUT_CHANGES -DLLE
ccflags-y += -DXG_FRAG_SIZE_PRESENT -DXG_FRAG_PAGE_PRESENT
char smac_addr[ETH_ALEN];
unsigned long state;
atomic_t ref_cnt;
+ atomic_t del_inprogress;
unsigned long last_refresh;
int hash_value;
u32 dqpn;
} while (0)
#define PRINT(level, x, fmt, arg...) \
- printk(level "%s: " fmt, MODULE_NAME, ##arg)
+ printk(level "%s: [PID%d]" fmt, MODULE_NAME, current->pid, ##arg)
#define XSMP_ERROR(fmt, arg...) \
PRINT(KERN_ERR, "XSMP", fmt, ##arg)
#define DRV_PRINT(fmt, arg...) \
PRINT(KERN_INFO, "DRV", fmt, ##arg)
#define xve_printk(level, priv, format, arg...) \
- printk(level "%s: " format "\n", \
+ printk(level "%s: [PID%d]" format "\n", \
((struct xve_dev_priv *) priv)->netdev->name, \
+ current->pid, \
## arg)
#define xve_warn(priv, format, arg...) \
xve_printk(KERN_WARNING, priv, format, ## arg)
do { \
if (xve_debug_level & level) { \
if (priv) \
- pr_info("%s: " format "\n", \
+ pr_info_ratelimited("%s: [PID%d] " format "\n",\
((struct xve_dev_priv *) priv)->netdev->name, \
+ current->pid, \
## arg); \
else \
- pr_info("XVE: " format "\n", ## arg); \
+ pr_info_ratelimited("XVE" format "\n", ## arg);\
} \
} while (0)
void queue_age_work(struct xve_dev_priv *priv, int msecs);
void xve_mark_paths_invalid(struct net_device *dev);
-void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid);
+void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid,
+ struct xve_fwt_entry *fwt_entry);
struct xve_dev_priv *xve_intf_alloc(const char *format);
int xve_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
union ib_gid *gid, u32 qpn, char *smac, u16 vlan);
void xve_fwt_cleanup(struct xve_dev_priv *xvep);
int xve_advert_process(struct xve_dev_priv *priv, struct sk_buff *skb);
-struct xve_fwt_entry *xve_fwt_lookup(struct xve_fwt_s *xve_fwt, char *mac,
+struct xve_fwt_entry *xve_fwt_lookup(struct xve_dev_priv *priv, char *mac,
u16 vlan, int refresh);
void xve_fwt_put_ctx(struct xve_fwt_s *xve_fwt,
struct xve_fwt_entry *fwt_entry);
-struct xve_fwt_entry *xve_fwt_list(struct xve_fwt_s *xve_fwt, int val);
bool xve_fwt_entry_valid(struct xve_fwt_s *xve_fwt,
struct xve_fwt_entry *fwt_entry);
void xve_flush_l2_entries(struct net_device *netdev, struct xve_path *path);
ib_modify_qp(p->qp, &xve_cm_err_attr,
IB_QP_STATE);
xve_debug(DEBUG_CM_INFO, priv,
- "M%d QP[%x] TX completions pending[%d]",
+ "M%d QP[%x] TX Completions pending[%d]",
modify, qpnum, p->tx_head - p->tx_tail);
}
*/
if (p->path)
xve_flush_single_path_by_gid(dev,
- &p->path->pathrec.dgid);
+ &p->path->pathrec.dgid, NULL);
xve_cm_set(p->path, NULL);
xve_cm_tx_destroy(p);
netif_tx_lock_bh(dev);
/* Flush all Paths */
list_for_each_entry_safe(path, tp, &priv->path_list, list)
- xve_flush_single_path_by_gid(dev, &path->pathrec.dgid);
+ xve_flush_single_path_by_gid(dev, &path->pathrec.dgid, NULL);
return 0;
module_param(xve_eoib_mode, uint, 0444);
MODULE_PARM_DESC(xve_eoib_mode, "Always use UD mode irrespective of xsmp.vnet_mode value");
+static int xve_age_path = 1;
+module_param(xve_age_path, int, 0644);
+MODULE_PARM_DESC(xve_age_path, "Age path enable/disable if no fwt entries");
+
static void xve_send_msg_to_xsigod(xsmp_cookie_t xsmp_hndl, void *data,
int len);
struct xve_path_iter {
netdev = path->dev;
priv = netdev_priv(netdev);
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Freeing the path %p",
+ __func__, path);
while ((skb = __skb_dequeue(&path->queue)))
dev_kfree_skb_irq(skb);
}
-void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid)
+void xve_flush_single_path_by_gid(struct net_device *dev, union ib_gid *gid,
+ struct xve_fwt_entry *fwt_entry)
{
struct xve_dev_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
struct xve_path *path;
+ uint8_t path_ret = 0;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
char *mgid_token = gid->raw;
char tmp_buf[64];
- xve_debug(DEBUG_FLUSH_INFO, priv, "%s Path not found\n",
- __func__);
print_mgid_buf(tmp_buf, mgid_token);
- xve_debug(DEBUG_FLUSH_INFO, priv, "%s MGID %s\n",
- __func__, tmp_buf);
- spin_unlock_irqrestore(&priv->lock, flags);
- netif_tx_unlock_bh(dev);
- return;
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Path not found MGID %s",
+ __func__, tmp_buf);
+ path_ret = 1;
+ }
+
+
+ if (fwt_entry != NULL) {
+ xve_remove_fwt_entry(priv, fwt_entry);
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Fwt removed %p",
+ __func__, fwt_entry);
+ /*
+ * There is more than one FWT entry in this path,
+ * destroy just this FWT entry.
+ */
+ if ((path && !list_empty(&path->fwt_list)) || !xve_age_path) {
+ xve_info(priv, "path%p has more entries FWT%p",
+ path, fwt_entry);
+ path_ret = 1;
+ }
}
- xve_debug(DEBUG_FLUSH_INFO, priv, "%s Flushing the path %p\n",
+ if (path_ret)
+ goto unlock;
+
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Flushing the path %p",
__func__, path);
+ /* This path is not used in subsequent path_look ups's */
rb_erase(&path->rb_node, &priv->path_tree);
if (path->query)
ib_sa_cancel_query(path->query_id, path->query);
xve_free_path(path);
}
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Flushed the path %p",
+ __func__, path);
timeout:
return;
+unlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+ return;
+
}
static void path_rec_completion(int status,
struct xve_path *path;
unsigned long flags = 0;
- xve_debug(DEBUG_TABLE_INFO, priv, "%s Adding FWT to list %p\n",
+ xve_debug(DEBUG_TABLE_INFO, priv, "%s Adding FWT to list %p",
__func__, fwt_entry);
path = xve_find_path_by_gid(priv, &fwt_entry->dgid);
if (!path)
}
len = skb->len;
- fwt_entry = xve_fwt_lookup(&priv->xve_fwt, eth_hdr(skb)->h_dest,
+ fwt_entry = xve_fwt_lookup(priv, eth_hdr(skb)->h_dest,
vlan_tag, 0);
if (!fwt_entry) {
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
send_ack:
ret = xve_xsmp_send_ack(priv, xmsgp);
if (ret) {
- XSMP_ERROR("%s: xve_xsmp_send_ack error name: %s, VID=0x%llx\n",
- __func__, xmsgp->xve_name,
- be64_to_cpu(xmsgp->resource_id));
+ xve_info(priv, "%s: xve_xsmp_send_ack error name VID=0x%llx",
+ __func__, be64_to_cpu(xmsgp->resource_id));
}
if (update_state && priv->vnic_type == XSMP_XCM_OVN) {
xve_info(priv, "Sending Oper state to chassis for id %llx\n",
if (send_ack) {
ret = xve_xsmp_send_ack(priv, xmsgp);
if (ret) {
- XSMP_ERROR("%s: xve_xsmp_send_ack error name: %s\n"
- "VID=0x%llx\n", __func__, xmsgp->xve_name,
- be64_to_cpu(xmsgp->resource_id));
+ xve_info(priv, "%s: error name VID=0x%llx",
+ __func__, be64_to_cpu(xmsgp->resource_id));
}
}
mutex_unlock(&priv->mutex);
ret = xve_xsmp_send_ack(priv, xmsgp);
if (ret) {
- XSMP_ERROR("%s: xve_xsmp_send_ack error name: %s, VID=0x%llx\n",
- __func__, xmsgp->xve_name,
- be64_to_cpu(xmsgp->resource_id));
+ xve_info(priv, "%s: xve_xsmp_send_ack error name VID=0x%llx",
+ __func__, be64_to_cpu(xmsgp->resource_id));
}
(void) xve_xsmp_handle_oper_req(priv->xsmp_hndl,
#include <linux/pkt_sched.h>
#include <linux/random.h>
-static int xve_age_path = 1;
-module_param(xve_age_path, int, 0644);
-MODULE_PARM_DESC(xve_age_path, "Age path enable/disable if no fwt entries");
u32 xve_hash_salt __read_mostly;
static struct kmem_cache *xve_fwt_cache __read_mostly;
return NULL;
}
-static struct xve_fwt_entry *xve_fwt_find_valid(struct hlist_head *head)
-{
- struct xve_fwt_entry *fwt_entry;
-
- hlist_for_each_entry(fwt_entry, head, hlist) {
- if (test_bit(XVE_FWT_ENTRY_VALID, &fwt_entry->state))
- return fwt_entry;
- }
- return NULL;
-}
-
-struct xve_fwt_entry *xve_fwt_list(struct xve_fwt_s *xve_fwt, int val)
-{
- struct hlist_head *head;
- struct xve_fwt_entry *fwt_entry = NULL;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&xve_fwt->lock, flags);
- head = &xve_fwt->fwt[val];
- if (head != NULL)
- fwt_entry = xve_fwt_find_valid(head);
- if (fwt_entry)
- atomic_inc(&fwt_entry->ref_cnt);
- spin_unlock_irqrestore(&xve_fwt->lock, flags);
- return fwt_entry;
-}
-
bool xve_fwt_entry_valid(struct xve_fwt_s *xve_fwt,
struct xve_fwt_entry *fwt_entry)
{
struct xve_fwt_s *xve_fwt = &priv->xve_fwt;
int i;
char *smac;
- union ib_gid dgid;
- int is_list_empty = 0;
struct hlist_head *head;
struct hlist_node *n;
hlist_for_each_entry_safe(fwt_entry, n, head, hlist) {
if (xve_fwt_entry_valid(xve_fwt, fwt_entry) == true) {
smac = fwt_entry->smac_addr;
+ spin_lock_irqsave(&priv->lock, flags);
if (!test_and_clear_bit
(XVE_FWT_ENTRY_REFRESH, &fwt_entry->state)
&& ((jiffies - fwt_entry->last_refresh) >=
priv->aging_delay)) {
xve_info(priv,
- "MAC %pM vlan %d Aged out",
+ "MAC %pM vlan %d Aged[D] out",
smac, fwt_entry->vlan);
- /*
- * Can there be a race here where path
- * becomes a bad address when paths
- * gets flushed??
- */
- spin_lock_irqsave(&priv->lock, flags);
- xve_remove_fwt_entry(priv, fwt_entry);
+ atomic_set(&fwt_entry->del_inprogress,
+ 1);
path = fwt_entry->path;
- if (path)
- memcpy(dgid.raw,
- path->pathrec.dgid.raw,
- sizeof(dgid));
- if (path && list_empty(&path->fwt_list))
- is_list_empty = 1;
- spin_unlock_irqrestore(&priv->lock,
- flags);
- if (xve_age_path && is_list_empty)
- xve_flush_single_path_by_gid
- (priv->netdev, &dgid);
- xve_fwt_put_ctx(xve_fwt, fwt_entry);
- xve_fwt_entry_free(priv, fwt_entry);
+ if (path) {
+ spin_unlock_irqrestore(
+ &priv->lock, flags);
+ xve_flush_single_path_by_gid(
+ priv->netdev,
+ &path->pathrec.dgid,
+ fwt_entry);
+ spin_lock_irqsave(&priv->lock,
+ flags);
+ xve_fwt_put_ctx(xve_fwt,
+ fwt_entry);
+ xve_fwt_entry_free(priv,
+ fwt_entry);
+ } else
+ xve_remove_fwt_entry(priv,
+ fwt_entry);
+
priv->counters[XVE_MAC_AGED_COUNTER]++;
} else {
priv->counters[XVE_MAC_STILL_INUSE]++;
xve_fwt_put_ctx(xve_fwt, fwt_entry);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
priv->counters[XVE_MAC_AGED_NOMATCHES]++;
}
return 0;
}
-struct xve_fwt_entry *xve_fwt_lookup(struct xve_fwt_s *xve_fwt, char *mac,
+struct xve_fwt_entry *xve_fwt_lookup(struct xve_dev_priv *priv, char *mac,
u16 vlan, int refresh)
{
+ struct xve_fwt_s *xve_fwt = &priv->xve_fwt;
+ struct xve_fwt_entry *fwt_entry;
unsigned long flags;
struct hlist_head *head;
- struct xve_fwt_entry *fwt_entry;
spin_lock_irqsave(&xve_fwt->lock, flags);
head = &xve_fwt->fwt[xve_mac_hash(mac, XVE_FWT_HASH_LISTS, vlan)];
+ xve_debug(DEBUG_TABLE_INFO, priv,
+ "Hash value%d %pM vlan %d entries %d",
+ xve_mac_hash(mac, XVE_FWT_HASH_LISTS, vlan),
+ mac, vlan,
+ xve_fwt->num);
fwt_entry = xve_fwt_find_entry(head, mac, vlan);
+
if (fwt_entry) {
+ if (atomic_read(&fwt_entry->del_inprogress)) {
+ xve_info(priv, "%p Table delete in progress mac%pM",
+ fwt_entry, mac);
+ return NULL;
+ }
atomic_inc(&fwt_entry->ref_cnt);
if (refresh)
set_bit(XVE_FWT_ENTRY_REFRESH, &fwt_entry->state);
fwt_entry->last_refresh = jiffies;
- } else {
- xve_debug(DEBUG_TABLE_INFO, NULL,
- "%s No match for %02x%02x%02x%02x%02x%02x vlan %d\n",
- __func__, mac[0], mac[1], mac[2], mac[3], mac[4],
- mac[5], vlan);
}
spin_unlock_irqrestore(&xve_fwt->lock, flags);
return fwt_entry;
!memcmp(&gid->raw, &priv->gw.t_gid.raw, sizeof(*gid)))
qpn = priv->gw.t_data_qp;
- fwt_entry = xve_fwt_lookup(xve_fwt, smac, vlan, 1);
+ /* Get a FWT entry for this mac and vlan */
+ spin_lock_irqsave(&priv->lock, flags);
+ fwt_entry = xve_fwt_lookup(priv, smac, vlan, 1);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
if (fwt_entry) {
if (unlikely
(memcmp
unsigned long flags = 0;
spin_lock_irqsave(&xve_fwt->lock, flags);
- xve_debug(DEBUG_TABLE_INFO, priv, "%s Deleting FWT From list %p\n",
- __func__, fwt_entry);
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Deleting FWT[%d] From list %p",
+ __func__, xve_fwt->num, fwt_entry);
if (fwt_entry->path)
list_del(&fwt_entry->list);
hlist_del(&fwt_entry->hlist);
struct xve_fwt_entry *fwt_entry)
{
unsigned long begin;
+ unsigned long flags = 0;
/*
* Wait for refernce count to goto zero (Use kref which is better)
*/
begin = jiffies;
+ xve_debug(DEBUG_FLUSH_INFO, priv, "%s Free cache ,FWT %p cnt%d",
+ __func__, fwt_entry, atomic_read(&fwt_entry->ref_cnt));
while (atomic_read(&fwt_entry->ref_cnt)) {
- xve_debug(DEBUG_TABLE_INFO, priv,
- "%s Waiting for ref cnt to become zero %p\n",
- __func__, fwt_entry);
if (time_after(jiffies, begin + 5 * HZ)) {
xve_warn(priv,
- "timing out fwt_entry still in use %p\n",
+ "timing out fwt_entry still in use %p",
fwt_entry);
break;
}
- msleep(20);
+ /* We are sure that this is called in a single context*/
+ if (spin_is_locked(&priv->lock)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ msleep(20);
+ spin_lock_irqsave(&priv->lock, flags);
+ } else
+ msleep(20);
}
kmem_cache_free(xve_fwt_cache, fwt_entry);
}