rb_insert_color(&new->node, sl_id_map);
}
+/* try to reschedule the delayed work
+ * if not scheduled before, schedule it; otherwise, try to cancel it first and
+ * reschedule on the cuccess of cancellation.
+ *
+ * sriov->going_down_lock & sriov->id_map_lock are required by caller
+ *
+ * returns 1 when (re)scheduled successfully; 0 otherwise
+ */
+static int __try_reschedule(struct id_map_entry *ent)
+{
+ if (!ent->scheduled_delete) {
+ ent->scheduled_delete = 1;
+ schedule_delayed_work(&ent->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ return 1;
+ }
+
+ /* try to cancel delayed work */
+ if (cancel_delayed_work(&ent->timeout)) {
+ /* work was successfully cancelled, work not running,
+ * it's safe to queue another one */
+ schedule_delayed_work(&ent->timeout, CM_CLEANUP_CACHE_TIMEOUT);
+ return 1;
+ }
+
+ /* the timeout() work maybe running, don't queue another one */
+ return 0;
+}
+
static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
{
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
spin_lock(&sriov->id_map_lock);
spin_lock_irqsave(&sriov->going_down_lock, flags);
/*make sure that there is no schedule inside the scheduled work.*/
- if (!sriov->is_going_down) {
- id->scheduled_delete = 1;
- schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
- }
+ if (!sriov->is_going_down)
+ __try_reschedule(id);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);
}
ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id);
if (ent && sched_or_cancel) {
if (sched_or_cancel == ID_SCHED_DELETE) {
- cancel_delayed_work(&ent->timeout);
- ent->scheduled_delete = 1;
- schedule_delayed_work(&ent->timeout,
- CM_CLEANUP_CACHE_TIMEOUT);
+ if (!__try_reschedule(ent))
+ ent = NULL;
}
if (sched_or_cancel == ID_CANCEL_DELETE) {
- ent->scheduled_delete = 0;
- cancel_delayed_work(&ent->timeout);
+ if (cancel_delayed_work(&ent->timeout))
+ ent->scheduled_delete = 0;
+ else
+ ent = NULL;
}
}
spin_unlock(&sriov->id_map_lock);
struct rb_root *sl_id_map = &sriov->sl_id_map;
struct list_head lh;
struct rb_node *nd;
- int need_flush = 1;
+ int no_flush = 1;
struct id_map_entry *map, *tmp_map;
/* cancel all delayed work queue entries */
INIT_LIST_HEAD(&lh);
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
if (slave < 0 || slave == map->slave_id) {
if (map->scheduled_delete)
- need_flush &= !!cancel_delayed_work(&map->timeout);
+ no_flush &= !!cancel_delayed_work(&map->timeout);
}
}
spin_unlock(&sriov->id_map_lock);
- if (!need_flush)
+ if (!no_flush)
flush_scheduled_work(); /* make sure all timers were flushed */
/* now, remove all leftover entries from databases*/