*
  * Perform special MAC address swapping for fail_over_mac settings
  *
- * Called with RTNL, curr_slave_lock for write_bh.
+ * Called with RTNL
  */
 static void bond_do_fail_over_mac(struct bonding *bond,
                                  struct slave *new_active,
                                  struct slave *old_active)
-       __releases(&bond->curr_slave_lock)
-       __acquires(&bond->curr_slave_lock)
 {
        u8 tmp_mac[ETH_ALEN];
        struct sockaddr saddr;
 
        switch (bond->params.fail_over_mac) {
        case BOND_FOM_ACTIVE:
-               if (new_active) {
-                       write_unlock_bh(&bond->curr_slave_lock);
+               if (new_active)
                        bond_set_dev_addr(bond->dev, new_active->dev);
-                       write_lock_bh(&bond->curr_slave_lock);
-               }
                break;
        case BOND_FOM_FOLLOW:
                /*
                if (!new_active)
                        return;
 
-               write_unlock_bh(&bond->curr_slave_lock);
-
                if (old_active) {
                        ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
                        ether_addr_copy(saddr.sa_data,
                        netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
                                   -rv, new_active->dev->name);
 out:
-               write_lock_bh(&bond->curr_slave_lock);
                break;
        default:
                netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
 static bool bond_should_change_active(struct bonding *bond)
 {
        struct slave *prim = rtnl_dereference(bond->primary_slave);
-       struct slave *curr = bond_deref_active_protected(bond);
+       struct slave *curr = rtnl_dereference(bond->curr_active_slave);
 
        if (!prim || !curr || curr->link != BOND_LINK_UP)
                return true;
  * because it is apparently the best available slave we have, even though its
  * updelay hasn't timed out yet.
  *
- * If new_active is not NULL, caller must hold curr_slave_lock for write_bh.
+ * Caller must hold RTNL.
  */
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 {
        struct slave *old_active;
 
-       old_active = rcu_dereference_protected(bond->curr_active_slave,
-                                              !new_active ||
-                                              lockdep_is_held(&bond->curr_slave_lock));
+       ASSERT_RTNL();
+
+       old_active = rtnl_dereference(bond->curr_active_slave);
 
        if (old_active == new_active)
                return;
                                        bond_should_notify_peers(bond);
                        }
 
-                       write_unlock_bh(&bond->curr_slave_lock);
-
                        call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
                        if (should_notify_peers)
                                call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
                                                         bond->dev);
-
-                       write_lock_bh(&bond->curr_slave_lock);
                }
        }
 
  * - The primary_slave has got its link back.
  * - A slave has got its link back and there's no old curr_active_slave.
  *
- * Caller must hold curr_slave_lock for write_bh.
+ * Caller must hold RTNL.
  */
 void bond_select_active_slave(struct bonding *bond)
 {
        int rv;
 
        best_slave = bond_find_best_slave(bond);
-       if (best_slave != bond_deref_active_protected(bond)) {
+       if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
                bond_change_active_slave(bond, best_slave);
                rv = bond_set_carrier(bond);
                if (!rv)
 
        if (bond_uses_primary(bond)) {
                block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
-               write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
        }
 
                RCU_INIT_POINTER(bond->primary_slave, NULL);
        if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
                block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
                bond_change_active_slave(bond, NULL);
                bond_select_active_slave(bond);
-               write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
        }
        /* either primary_slave or curr_active_slave might've changed */
        if (rtnl_dereference(bond->primary_slave) == slave)
                RCU_INIT_POINTER(bond->primary_slave, NULL);
 
-       if (oldcurrent == slave) {
-               write_lock_bh(&bond->curr_slave_lock);
+       if (oldcurrent == slave)
                bond_change_active_slave(bond, NULL);
-               write_unlock_bh(&bond->curr_slave_lock);
-       }
 
        if (bond_is_lb(bond)) {
                /* Must be called only after the slave has been
                 * is no concern that another slave add/remove event
                 * will interfere.
                 */
-               write_lock_bh(&bond->curr_slave_lock);
-
                bond_select_active_slave(bond);
-
-               write_unlock_bh(&bond->curr_slave_lock);
        }
 
        if (!bond_has_slaves(bond)) {
 do_failover:
                ASSERT_RTNL();
                block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
-               write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
        }
 
                if (slave_state_changed) {
                        bond_slave_state_change(bond);
                } else if (do_failover) {
-                       /* the bond_select_active_slave must hold RTNL
-                        * and curr_slave_lock for write.
-                        */
                        block_netpoll_tx();
-                       write_lock_bh(&bond->curr_slave_lock);
-
                        bond_select_active_slave(bond);
-
-                       write_unlock_bh(&bond->curr_slave_lock);
                        unblock_netpoll_tx();
                }
                rtnl_unlock();
 do_failover:
                ASSERT_RTNL();
                block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
-               write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
        }
 
                            primary ? slave_dev->name : "none");
 
                block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
-               write_unlock_bh(&bond->curr_slave_lock);
                unblock_netpoll_tx();
                break;
        case NETDEV_FEAT_CHANGE:
 
        /* reset slave->backup and slave->inactive */
        if (bond_has_slaves(bond)) {
-               read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
                        if (bond_uses_primary(bond) &&
                            slave != rcu_access_pointer(bond->curr_active_slave)) {
                                                            BOND_SLAVE_NOTIFY_NOW);
                        }
                }
-               read_unlock(&bond->curr_slave_lock);
        }
 
        bond_work_init_all(bond);
                if (!mii)
                        return -EINVAL;
 
-
                if (mii->reg_num == 1) {
                        mii->val_out = 0;
-                       read_lock(&bond->curr_slave_lock);
                        if (netif_carrier_ok(bond->dev))
                                mii->val_out = BMSR_LSTATUS;
-
-                       read_unlock(&bond->curr_slave_lock);
                }
 
                return 0;