EXPORT_SYMBOL(__percpu_counter_compare);
 
 /*
- * Compare counter, and add amount if the total is within limit.
- * Return true if amount was added, false if it would exceed limit.
+ * Compare counter, and add amount if total is: less than or equal to limit if
+ * amount is positive, or greater than or equal to limit if amount is negative.
+ * Return true if amount is added, or false if total would be beyond the limit.
+ *
+ * Negative limit is allowed, but unusual.
+ * When negative amounts (subs) are given to percpu_counter_limited_add(),
+ * the limit would most naturally be 0 - but other limits are also allowed.
+ *
+ * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
+ * are all assumed to be sane (far from S64_MIN and S64_MAX).
  */
 bool __percpu_counter_limited_add(struct percpu_counter *fbc,
                                  s64 limit, s64 amount, s32 batch)
        s64 count;
        s64 unknown;
        unsigned long flags;
-       bool good;
+       bool good = false;
 
-       if (amount > limit)
-               return false;
+       if (amount == 0)
+               return true;
 
        local_irq_save(flags);
        unknown = batch * num_online_cpus();
 
        /* Skip taking the lock when safe */
        if (abs(count + amount) <= batch &&
-           fbc->count + unknown <= limit) {
+           ((amount > 0 && fbc->count + unknown <= limit) ||
+            (amount < 0 && fbc->count - unknown >= limit))) {
                this_cpu_add(*fbc->counters, amount);
                local_irq_restore(flags);
                return true;
        count = fbc->count + amount;
 
        /* Skip percpu_counter_sum() when safe */
-       if (count + unknown > limit) {
+       if (amount > 0) {
+               if (count - unknown > limit)
+                       goto out;
+               if (count + unknown <= limit)
+                       good = true;
+       } else {
+               if (count + unknown < limit)
+                       goto out;
+               if (count - unknown >= limit)
+                       good = true;
+       }
+
+       if (!good) {
                s32 *pcount;
                int cpu;
 
                        pcount = per_cpu_ptr(fbc->counters, cpu);
                        count += *pcount;
                }
+               if (amount > 0) {
+                       if (count > limit)
+                               goto out;
+               } else {
+                       if (count < limit)
+                               goto out;
+               }
+               good = true;
        }
 
-       good = count <= limit;
-       if (good) {
-               count = __this_cpu_read(*fbc->counters);
-               fbc->count += count + amount;
-               __this_cpu_sub(*fbc->counters, count);
-       }
-
+       count = __this_cpu_read(*fbc->counters);
+       fbc->count += count + amount;
+       __this_cpu_sub(*fbc->counters, count);
+out:
        raw_spin_unlock(&fbc->lock);
        local_irq_restore(flags);
        return good;