We prefer static_branch_unlikely() over static_key_false() these days.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
 #ifdef CONFIG_RPS
-       if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
+       if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
                /* Select queue was not called for the skbuff, so we extract the
                 * RPS hash and save it into the flow_table here.
                 */
 
 
 #ifdef CONFIG_RPS
 #include <linux/static_key.h>
-extern struct static_key rps_needed;
-extern struct static_key rfs_needed;
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
 #endif
 
 struct neighbour;
 
 static inline void sock_rps_record_flow(const struct sock *sk)
 {
 #ifdef CONFIG_RPS
-       if (static_key_false(&rfs_needed)) {
+       if (static_branch_unlikely(&rfs_needed)) {
                /* Reading sk->sk_rxhash might incur an expensive cache line
                 * miss.
                 *
 
 u32 rps_cpu_mask __read_mostly;
 EXPORT_SYMBOL(rps_cpu_mask);
 
-struct static_key rps_needed __read_mostly;
+struct static_key_false rps_needed __read_mostly;
 EXPORT_SYMBOL(rps_needed);
-struct static_key rfs_needed __read_mostly;
+struct static_key_false rfs_needed __read_mostly;
 EXPORT_SYMBOL(rfs_needed);
 
 static struct rps_dev_flow *
        }
 
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
 
        rcu_read_lock();
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
 
        rcu_read_lock();
 #ifdef CONFIG_RPS
-       if (static_key_false(&rps_needed)) {
+       if (static_branch_unlikely(&rps_needed)) {
                list_for_each_entry_safe(skb, next, head, list) {
                        struct rps_dev_flow voidflow, *rflow = &voidflow;
                        int cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
        rcu_assign_pointer(queue->rps_map, map);
 
        if (map)
-               static_key_slow_inc(&rps_needed);
+               static_branch_inc(&rps_needed);
        if (old_map)
-               static_key_slow_dec(&rps_needed);
+               static_branch_dec(&rps_needed);
 
        mutex_unlock(&rps_map_mutex);
 
 
                if (sock_table != orig_sock_table) {
                        rcu_assign_pointer(rps_sock_flow_table, sock_table);
                        if (sock_table) {
-                               static_key_slow_inc(&rps_needed);
-                               static_key_slow_inc(&rfs_needed);
+                               static_branch_inc(&rps_needed);
+                               static_branch_inc(&rfs_needed);
                        }
                        if (orig_sock_table) {
-                               static_key_slow_dec(&rps_needed);
-                               static_key_slow_dec(&rfs_needed);
+                               static_branch_dec(&rps_needed);
+                               static_branch_dec(&rfs_needed);
                                synchronize_rcu();
                                vfree(orig_sock_table);
                        }