As all read_locks are gone spin lock is preferred.
Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>
 
        /* for scheduling */
        struct ip_vs_scheduler  *scheduler;    /* bound scheduler object */
-       rwlock_t                sched_lock;    /* lock sched_data */
+       spinlock_t              sched_lock;    /* lock sched_data */
        void                    *sched_data;   /* scheduler application data */
 
        /* alternate persistence engine */
 
        svc->net = net;
 
        INIT_LIST_HEAD(&svc->destinations);
-       rwlock_init(&svc->sched_lock);
+       spin_lock_init(&svc->sched_lock);
        spin_lock_init(&svc->stats.lock);
 
        /* Bind the scheduler */
 
 
 /*
  * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
- * address to a server. Called under write lock.
+ * address to a server. Called under spin lock.
  */
 static inline struct ip_vs_lblc_entry *
 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
        struct hlist_node *next;
        int i;
 
-       write_lock_bh(&svc->sched_lock);
+       spin_lock_bh(&svc->sched_lock);
        tbl->dead = 1;
        for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
                        atomic_dec(&tbl->entries);
                }
        }
-       write_unlock_bh(&svc->sched_lock);
+       spin_unlock_bh(&svc->sched_lock);
 }
 
 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
        for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-               write_lock(&svc->sched_lock);
+               spin_lock(&svc->sched_lock);
                hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
                        if (time_before(now,
                                        en->lastuse +
                        ip_vs_lblc_free(en);
                        atomic_dec(&tbl->entries);
                }
-               write_unlock(&svc->sched_lock);
+               spin_unlock(&svc->sched_lock);
        }
        tbl->rover = j;
 }
        for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
-               write_lock(&svc->sched_lock);
+               spin_lock(&svc->sched_lock);
                hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
                        if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
                                continue;
                        atomic_dec(&tbl->entries);
                        goal--;
                }
-               write_unlock(&svc->sched_lock);
+               spin_unlock(&svc->sched_lock);
                if (goal <= 0)
                        break;
        }
        }
 
        /* If we fail to create a cache entry, we'll just use the valid dest */
-       write_lock(&svc->sched_lock);
+       spin_lock(&svc->sched_lock);
        if (!tbl->dead)
                ip_vs_lblc_new(tbl, &iph.daddr, dest);
-       write_unlock(&svc->sched_lock);
+       spin_unlock(&svc->sched_lock);
 
 out:
        IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
 
 
 /*
  * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
- * IP address to a server. Called under write lock.
+ * IP address to a server. Called under spin lock.
  */
 static inline struct ip_vs_lblcr_entry *
 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
        struct ip_vs_lblcr_entry *en;
        struct hlist_node *next;
 
-       write_lock_bh(&svc->sched_lock);
+       spin_lock_bh(&svc->sched_lock);
        tbl->dead = 1;
        for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
                        ip_vs_lblcr_free(en);
                }
        }
-       write_unlock_bh(&svc->sched_lock);
+       spin_unlock_bh(&svc->sched_lock);
 }
 
 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
        for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-               write_lock(&svc->sched_lock);
+               spin_lock(&svc->sched_lock);
                hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
                        if (time_after(en->lastuse +
                                       sysctl_lblcr_expiration(svc), now))
                        ip_vs_lblcr_free(en);
                        atomic_dec(&tbl->entries);
                }
-               write_unlock(&svc->sched_lock);
+               spin_unlock(&svc->sched_lock);
        }
        tbl->rover = j;
 }
        for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
-               write_lock(&svc->sched_lock);
+               spin_lock(&svc->sched_lock);
                hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
                        if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
                                continue;
                        atomic_dec(&tbl->entries);
                        goal--;
                }
-               write_unlock(&svc->sched_lock);
+               spin_unlock(&svc->sched_lock);
                if (goal <= 0)
                        break;
        }
                if (atomic_read(&en->set.size) > 1 &&
                    time_after(jiffies, en->set.lastmod +
                                sysctl_lblcr_expiration(svc))) {
-                       write_lock(&svc->sched_lock);
+                       spin_lock(&svc->sched_lock);
                        if (atomic_read(&en->set.size) > 1) {
                                struct ip_vs_dest *m;
 
                                if (m)
                                        ip_vs_dest_set_erase(&en->set, m);
                        }
-                       write_unlock(&svc->sched_lock);
+                       spin_unlock(&svc->sched_lock);
                }
 
                /* If the destination is not overloaded, use it */
                }
 
                /* Update our cache entry */
-               write_lock(&svc->sched_lock);
+               spin_lock(&svc->sched_lock);
                if (!tbl->dead)
                        ip_vs_dest_set_insert(&en->set, dest, true);
-               write_unlock(&svc->sched_lock);
+               spin_unlock(&svc->sched_lock);
                goto out;
        }
 
        }
 
        /* If we fail to create a cache entry, we'll just use the valid dest */
-       write_lock(&svc->sched_lock);
+       spin_lock(&svc->sched_lock);
        if (!tbl->dead)
                ip_vs_lblcr_new(tbl, &iph.daddr, dest);
-       write_unlock(&svc->sched_lock);
+       spin_unlock(&svc->sched_lock);
 
 out:
        IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
 
 {
        struct list_head *p;
 
-       write_lock_bh(&svc->sched_lock);
+       spin_lock_bh(&svc->sched_lock);
        p = (struct list_head *) svc->sched_data;
        /* dest is already unlinked, so p->prev is not valid but
         * p->next is valid, use it to reach previous entry.
         */
        if (p == &dest->n_list)
                svc->sched_data = p->next->prev;
-       write_unlock_bh(&svc->sched_lock);
+       spin_unlock_bh(&svc->sched_lock);
        return 0;
 }
 
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
-       write_lock(&svc->sched_lock);
+       spin_lock(&svc->sched_lock);
        p = (struct list_head *) svc->sched_data;
        last = dest = list_entry(p, struct ip_vs_dest, n_list);
 
        } while (pass < 2 && p != &svc->destinations);
 
 stop:
-       write_unlock(&svc->sched_lock);
+       spin_unlock(&svc->sched_lock);
        ip_vs_scheduler_err(svc, "no destination available");
        return NULL;
 
   out:
        svc->sched_data = &dest->n_list;
-       write_unlock(&svc->sched_lock);
+       spin_unlock(&svc->sched_lock);
        IP_VS_DBG_BUF(6, "RR: server %s:%u "
                      "activeconns %d refcnt %d weight %d\n",
                      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
 
 {
        struct ip_vs_wrr_mark *mark = svc->sched_data;
 
-       write_lock_bh(&svc->sched_lock);
+       spin_lock_bh(&svc->sched_lock);
        mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
        mark->di = ip_vs_wrr_gcd_weight(svc);
        mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
                mark->cw = mark->mw;
        else if (mark->di > 1)
                mark->cw = (mark->cw / mark->di) * mark->di + 1;
-       write_unlock_bh(&svc->sched_lock);
+       spin_unlock_bh(&svc->sched_lock);
        return 0;
 }
 
 
        IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
 
-       write_lock(&svc->sched_lock);
+       spin_lock(&svc->sched_lock);
        dest = mark->cl;
        /* No available dests? */
        if (mark->mw == 0)
        mark->cl = dest;
 
   out:
-       write_unlock(&svc->sched_lock);
+       spin_unlock(&svc->sched_lock);
        return dest;
 
 err_noavail: