]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
locking/local_lock, mm: Replace localtry_ helpers with local_trylock_t type
authorAlexei Starovoitov <ast@kernel.org>
Tue, 1 Apr 2025 20:52:45 +0000 (13:52 -0700)
committerVlastimil Babka <vbabka@suse.cz>
Wed, 2 Apr 2025 08:12:18 +0000 (10:12 +0200)
Partially revert commit 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t").
Remove localtry_*() helpers, since localtry_lock() name might
be misinterpreted as "try lock".

Introduce local_trylock[_irqsave]() helpers that only work
with newly introduced local_trylock_t type.
Note that attempt to use local_trylock[_irqsave]() with local_lock_t
will cause compilation failure.

Usage and behavior in !PREEMPT_RT:

local_lock_t lock;                     // sizeof(lock) == 0
local_lock(&lock);                     // preempt disable
local_lock_irqsave(&lock, ...);        // irq save
if (local_trylock_irqsave(&lock, ...)) // compilation error

local_trylock_t lock;                  // sizeof(lock) == 4
local_lock(&lock);                     // preempt disable, acquired = 1
local_lock_irqsave(&lock, ...);        // irq save, acquired = 1
if (local_trylock(&lock))              // if (!acquired) preempt disable
if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save

The existing local_lock_*() macros can be used either with
local_lock_t or local_trylock_t.
With local_trylock_t they set acquired = 1 while local_unlock_*() clears it.

In !PREEMPT_RT local_lock_irqsave(local_lock_t *) disables interrupts
to protect critical section, but it doesn't prevent NMI, so the fully
reentrant code cannot use local_lock_irqsave(local_lock_t *) for
exclusive access.

The local_lock_irqsave(local_trylock_t *) helper disables interrupts
and sets acquired=1, so local_trylock_irqsave(local_trylock_t *) from
NMI attempting to acquire the same lock will return false.

In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock().
Map local_trylock_irqsave() to preemptible spin_trylock().
When in hard IRQ or NMI return false right away, since
spin_trylock() is not safe due to explicit locking in the underneath
rt_spin_trylock() implementation. Removing this explicit locking and
attempting only "trylock" is undesired due to PI implications.

The local_trylock() without _irqsave can be used to avoid the cost of
disabling/enabling interrupts by only disabling preemption, so
local_trylock() in an interrupt attempting to acquire the same
lock will return false.

Note there is no need to use local_inc for acquired variable,
since it's a percpu variable with strict nesting scopes.

Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/local_lock.h
include/linux/local_lock_internal.h
mm/memcontrol.c

index 1a0bc35839e360d5c8170105849c3883463852f8..7ac9385cd475dee006d551398746b65ad7987614 100644 (file)
        __local_unlock_irqrestore(lock, flags)
 
 /**
- * localtry_lock_init - Runtime initialize a lock instance
- */
-#define localtry_lock_init(lock)               __localtry_lock_init(lock)
-
-/**
- * localtry_lock - Acquire a per CPU local lock
- * @lock:      The lock variable
- */
-#define localtry_lock(lock)            __localtry_lock(lock)
-
-/**
- * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts
- * @lock:      The lock variable
- */
-#define localtry_lock_irq(lock)                __localtry_lock_irq(lock)
-
-/**
- * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
- *                      interrupts
- * @lock:      The lock variable
- * @flags:     Storage for interrupt flags
- */
-#define localtry_lock_irqsave(lock, flags)                             \
-       __localtry_lock_irqsave(lock, flags)
-
-/**
- * localtry_trylock - Try to acquire a per CPU local lock.
+ * local_trylock - Try to acquire a per CPU local lock
  * @lock:      The lock variable
  *
  * The function can be used in any context such as NMI or HARDIRQ. Due to
  * locking constrains it will _always_ fail to acquire the lock in NMI or
  * HARDIRQ context on PREEMPT_RT.
  */
-#define localtry_trylock(lock)         __localtry_trylock(lock)
+#define local_trylock(lock)            __local_trylock(lock)
 
 /**
- * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
- *                           interrupts if acquired
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ *                        interrupts if acquired
  * @lock:      The lock variable
  * @flags:     Storage for interrupt flags
  *
  * locking constrains it will _always_ fail to acquire the lock in NMI or
  * HARDIRQ context on PREEMPT_RT.
  */
-#define localtry_trylock_irqsave(lock, flags)                          \
-       __localtry_trylock_irqsave(lock, flags)
-
-/**
- * local_unlock - Release a per CPU local lock
- * @lock:      The lock variable
- */
-#define localtry_unlock(lock)          __localtry_unlock(lock)
-
-/**
- * local_unlock_irq - Release a per CPU local lock and enable interrupts
- * @lock:      The lock variable
- */
-#define localtry_unlock_irq(lock)              __localtry_unlock_irq(lock)
-
-/**
- * localtry_unlock_irqrestore - Release a per CPU local lock and restore
- *                           interrupt flags
- * @lock:      The lock variable
- * @flags:      Interrupt flags to restore
- */
-#define localtry_unlock_irqrestore(lock, flags)                        \
-       __localtry_unlock_irqrestore(lock, flags)
+#define local_trylock_irqsave(lock, flags)                     \
+       __local_trylock_irqsave(lock, flags)
 
 DEFINE_GUARD(local_lock, local_lock_t __percpu*,
             local_lock(_T),
index 67bd13d142fac39bc0f8b2c05eaf81717ff480f9..2389ae4f69a69d2eaf122b0bcc873c2f363f21d8 100644 (file)
@@ -15,10 +15,18 @@ typedef struct {
 #endif
 } local_lock_t;
 
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
 typedef struct {
-       local_lock_t    llock;
-       unsigned int    acquired;
-} localtry_lock_t;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+       struct task_struct      *owner;
+#endif
+       /*
+        * Same layout as local_lock_t with 'acquired' field at the end.
+        * (local_trylock_t *) will be cast to (local_lock_t *).
+        */
+       int acquired;
+} local_trylock_t;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define LOCAL_LOCK_DEBUG_INIT(lockname)               \
@@ -63,7 +71,6 @@ static inline void local_lock_debug_init(local_lock_t *l) { }
 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 
 #define INIT_LOCAL_LOCK(lockname)      { LOCAL_LOCK_DEBUG_INIT(lockname) }
-#define INIT_LOCALTRY_LOCK(lockname)   { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
 
 #define __local_lock_init(lock)                                        \
 do {                                                           \
@@ -73,7 +80,7 @@ do {                                                          \
        lockdep_init_map_type(&(lock)->dep_map, #lock, &__key,  \
                              0, LD_WAIT_CONFIG, LD_WAIT_INV,   \
                              LD_LOCK_PERCPU);                  \
-       local_lock_debug_init(lock);                            \
+       local_lock_debug_init((local_lock_t *)lock);            \
 } while (0)
 
 #define __spinlock_nested_bh_init(lock)                                \
@@ -87,149 +94,117 @@ do {                                                              \
        local_lock_debug_init(lock);                            \
 } while (0)
 
+#define __local_lock_acquire(lock)                                     \
+       do {                                                            \
+               local_trylock_t *tl;                                    \
+               local_lock_t *l;                                        \
+                                                                       \
+               l = (local_lock_t *)this_cpu_ptr(lock);                 \
+               tl = (local_trylock_t *)l;                              \
+               _Generic((lock),                                        \
+                       local_trylock_t *: ({                           \
+                               lockdep_assert(tl->acquired == 0);      \
+                               WRITE_ONCE(tl->acquired, 1);            \
+                       }),                                             \
+                       default:(void)0);                               \
+               local_lock_acquire(l);                                  \
+       } while (0)
+
 #define __local_lock(lock)                                     \
        do {                                                    \
                preempt_disable();                              \
-               local_lock_acquire(this_cpu_ptr(lock));         \
+               __local_lock_acquire(lock);                     \
        } while (0)
 
 #define __local_lock_irq(lock)                                 \
        do {                                                    \
                local_irq_disable();                            \
-               local_lock_acquire(this_cpu_ptr(lock));         \
+               __local_lock_acquire(lock);                     \
        } while (0)
 
 #define __local_lock_irqsave(lock, flags)                      \
        do {                                                    \
                local_irq_save(flags);                          \
-               local_lock_acquire(this_cpu_ptr(lock));         \
-       } while (0)
-
-#define __local_unlock(lock)                                   \
-       do {                                                    \
-               local_lock_release(this_cpu_ptr(lock));         \
-               preempt_enable();                               \
-       } while (0)
-
-#define __local_unlock_irq(lock)                               \
-       do {                                                    \
-               local_lock_release(this_cpu_ptr(lock));         \
-               local_irq_enable();                             \
+               __local_lock_acquire(lock);                     \
        } while (0)
 
-#define __local_unlock_irqrestore(lock, flags)                 \
-       do {                                                    \
-               local_lock_release(this_cpu_ptr(lock));         \
-               local_irq_restore(flags);                       \
-       } while (0)
-
-#define __local_lock_nested_bh(lock)                           \
-       do {                                                    \
-               lockdep_assert_in_softirq();                    \
-               local_lock_acquire(this_cpu_ptr(lock)); \
-       } while (0)
-
-#define __local_unlock_nested_bh(lock)                         \
-       local_lock_release(this_cpu_ptr(lock))
-
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock)                             \
-do {                                                           \
-       __local_lock_init(&(lock)->llock);                      \
-       WRITE_ONCE((lock)->acquired, 0);                        \
-} while (0)
-
-#define __localtry_lock(lock)                                  \
-       do {                                                    \
-               localtry_lock_t *lt;                            \
-               preempt_disable();                              \
-               lt = this_cpu_ptr(lock);                        \
-               local_lock_acquire(&lt->llock);                 \
-               WRITE_ONCE(lt->acquired, 1);                    \
-       } while (0)
-
-#define __localtry_lock_irq(lock)                              \
-       do {                                                    \
-               localtry_lock_t *lt;                            \
-               local_irq_disable();                            \
-               lt = this_cpu_ptr(lock);                        \
-               local_lock_acquire(&lt->llock);                 \
-               WRITE_ONCE(lt->acquired, 1);                    \
-       } while (0)
-
-#define __localtry_lock_irqsave(lock, flags)                   \
-       do {                                                    \
-               localtry_lock_t *lt;                            \
-               local_irq_save(flags);                          \
-               lt = this_cpu_ptr(lock);                        \
-               local_lock_acquire(&lt->llock);                 \
-               WRITE_ONCE(lt->acquired, 1);                    \
-       } while (0)
-
-#define __localtry_trylock(lock)                               \
+#define __local_trylock(lock)                                  \
        ({                                                      \
-               localtry_lock_t *lt;                            \
-               bool _ret;                                      \
+               local_trylock_t *tl;                            \
                                                                \
                preempt_disable();                              \
-               lt = this_cpu_ptr(lock);                        \
-               if (!READ_ONCE(lt->acquired)) {                 \
-                       WRITE_ONCE(lt->acquired, 1);            \
-                       local_trylock_acquire(&lt->llock);      \
-                       _ret = true;                            \
-               } else {                                        \
-                       _ret = false;                           \
+               tl = this_cpu_ptr(lock);                        \
+               if (READ_ONCE(tl->acquired)) {                  \
                        preempt_enable();                       \
+                       tl = NULL;                              \
+               } else {                                        \
+                       WRITE_ONCE(tl->acquired, 1);            \
+                       local_trylock_acquire(                  \
+                               (local_lock_t *)tl);            \
                }                                               \
-               _ret;                                           \
+               !!tl;                                           \
        })
 
-#define __localtry_trylock_irqsave(lock, flags)                        \
+#define __local_trylock_irqsave(lock, flags)                   \
        ({                                                      \
-               localtry_lock_t *lt;                            \
-               bool _ret;                                      \
+               local_trylock_t *tl;                            \
                                                                \
                local_irq_save(flags);                          \
-               lt = this_cpu_ptr(lock);                        \
-               if (!READ_ONCE(lt->acquired)) {                 \
-                       WRITE_ONCE(lt->acquired, 1);            \
-                       local_trylock_acquire(&lt->llock);      \
-                       _ret = true;                            \
-               } else {                                        \
-                       _ret = false;                           \
+               tl = this_cpu_ptr(lock);                        \
+               if (READ_ONCE(tl->acquired)) {                  \
                        local_irq_restore(flags);               \
+                       tl = NULL;                              \
+               } else {                                        \
+                       WRITE_ONCE(tl->acquired, 1);            \
+                       local_trylock_acquire(                  \
+                               (local_lock_t *)tl);            \
                }                                               \
-               _ret;                                           \
+               !!tl;                                           \
        })
 
-#define __localtry_unlock(lock)                                        \
+#define __local_lock_release(lock)                                     \
+       do {                                                            \
+               local_trylock_t *tl;                                    \
+               local_lock_t *l;                                        \
+                                                                       \
+               l = (local_lock_t *)this_cpu_ptr(lock);                 \
+               tl = (local_trylock_t *)l;                              \
+               _Generic((lock),                                        \
+                       local_trylock_t *: ({                           \
+                               lockdep_assert(tl->acquired == 1);      \
+                               WRITE_ONCE(tl->acquired, 0);            \
+                       }),                                             \
+                       default:(void)0);                               \
+               local_lock_release(l);                                  \
+       } while (0)
+
+#define __local_unlock(lock)                                   \
        do {                                                    \
-               localtry_lock_t *lt;                            \
-               lt = this_cpu_ptr(lock);                        \
-               WRITE_ONCE(lt->acquired, 0);                    \
-               local_lock_release(&lt->llock);                 \
+               __local_lock_release(lock);                     \
                preempt_enable();                               \
        } while (0)
 
-#define __localtry_unlock_irq(lock)                            \
+#define __local_unlock_irq(lock)                               \
        do {                                                    \
-               localtry_lock_t *lt;                            \
-               lt = this_cpu_ptr(lock);                        \
-               WRITE_ONCE(lt->acquired, 0);                    \
-               local_lock_release(&lt->llock);                 \
+               __local_lock_release(lock);                     \
                local_irq_enable();                             \
        } while (0)
 
-#define __localtry_unlock_irqrestore(lock, flags)              \
+#define __local_unlock_irqrestore(lock, flags)                 \
        do {                                                    \
-               localtry_lock_t *lt;                            \
-               lt = this_cpu_ptr(lock);                        \
-               WRITE_ONCE(lt->acquired, 0);                    \
-               local_lock_release(&lt->llock);                 \
+               __local_lock_release(lock);                     \
                local_irq_restore(flags);                       \
        } while (0)
 
+#define __local_lock_nested_bh(lock)                           \
+       do {                                                    \
+               lockdep_assert_in_softirq();                    \
+               local_lock_acquire(this_cpu_ptr(lock)); \
+       } while (0)
+
+#define __local_unlock_nested_bh(lock)                         \
+       local_lock_release(this_cpu_ptr(lock))
+
 #else /* !CONFIG_PREEMPT_RT */
 
 /*
@@ -237,10 +212,9 @@ do {                                                               \
  * critical section while staying preemptible.
  */
 typedef spinlock_t local_lock_t;
-typedef spinlock_t localtry_lock_t;
+typedef spinlock_t local_trylock_t;
 
 #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname)
 
 #define __local_lock_init(l)                                   \
        do {                                                    \
@@ -283,17 +257,7 @@ do {                                                               \
        spin_unlock(this_cpu_ptr((lock)));                      \
 } while (0)
 
-/* localtry_lock_t variants */
-
-#define __localtry_lock_init(lock)                     __local_lock_init(lock)
-#define __localtry_lock(lock)                          __local_lock(lock)
-#define __localtry_lock_irq(lock)                      __local_lock(lock)
-#define __localtry_lock_irqsave(lock, flags)           __local_lock_irqsave(lock, flags)
-#define __localtry_unlock(lock)                                __local_unlock(lock)
-#define __localtry_unlock_irq(lock)                    __local_unlock(lock)
-#define __localtry_unlock_irqrestore(lock, flags)      __local_unlock_irqrestore(lock, flags)
-
-#define __localtry_trylock(lock)                               \
+#define __local_trylock(lock)                                  \
        ({                                                      \
                int __locked;                                   \
                                                                \
@@ -308,11 +272,11 @@ do {                                                              \
                __locked;                                       \
        })
 
-#define __localtry_trylock_irqsave(lock, flags)                        \
+#define __local_trylock_irqsave(lock, flags)                   \
        ({                                                      \
                typecheck(unsigned long, flags);                \
                flags = 0;                                      \
-               __localtry_trylock(lock);                       \
+               __local_trylock(lock);                          \
        })
 
 #endif /* CONFIG_PREEMPT_RT */
index 83c2df73e4b6484806fcd9a5a701b319b22b7ade..bca86961754e19c2e97f16f63fb8da8f59d5bd3c 100644 (file)
@@ -1739,7 +1739,7 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
 }
 
 struct memcg_stock_pcp {
-       localtry_lock_t stock_lock;
+       local_trylock_t stock_lock;
        struct mem_cgroup *cached; /* this never be root cgroup */
        unsigned int nr_pages;
 
@@ -1754,7 +1754,7 @@ struct memcg_stock_pcp {
 #define FLUSHING_CACHED_CHARGE 0
 };
 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
-       .stock_lock = INIT_LOCALTRY_LOCK(stock_lock),
+       .stock_lock = INIT_LOCAL_LOCK(stock_lock),
 };
 static DEFINE_MUTEX(percpu_charge_mutex);
 
@@ -1785,11 +1785,10 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
        if (nr_pages > MEMCG_CHARGE_BATCH)
                return ret;
 
-       if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
-               if (!gfpflags_allow_spinning(gfp_mask))
-                       return ret;
-               localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
-       }
+       if (gfpflags_allow_spinning(gfp_mask))
+               local_lock_irqsave(&memcg_stock.stock_lock, flags);
+       else if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags))
+               return ret;
 
        stock = this_cpu_ptr(&memcg_stock);
        stock_pages = READ_ONCE(stock->nr_pages);
@@ -1798,7 +1797,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
                ret = true;
        }
 
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 
        return ret;
 }
@@ -1837,14 +1836,14 @@ static void drain_local_stock(struct work_struct *dummy)
         * drain_stock races is that we always operate on local CPU stock
         * here with IRQ disabled
         */
-       localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+       local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
        stock = this_cpu_ptr(&memcg_stock);
        old = drain_obj_stock(stock);
        drain_stock(stock);
        clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
        obj_cgroup_put(old);
 }
 
@@ -1874,7 +1873,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        unsigned long flags;
 
-       if (!localtry_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
+       if (!local_trylock_irqsave(&memcg_stock.stock_lock, flags)) {
                /*
                 * In case of unlikely failure to lock percpu stock_lock
                 * uncharge memcg directly.
@@ -1887,7 +1886,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
                return;
        }
        __refill_stock(memcg, nr_pages);
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 }
 
 /*
@@ -1944,9 +1943,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
        stock = &per_cpu(memcg_stock, cpu);
 
        /* drain_obj_stock requires stock_lock */
-       localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+       local_lock_irqsave(&memcg_stock.stock_lock, flags);
        old = drain_obj_stock(stock);
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 
        drain_stock(stock);
        obj_cgroup_put(old);
@@ -2729,7 +2728,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
        unsigned long flags;
        int *bytes;
 
-       localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+       local_lock_irqsave(&memcg_stock.stock_lock, flags);
        stock = this_cpu_ptr(&memcg_stock);
 
        /*
@@ -2782,7 +2781,7 @@ static void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
        if (nr)
                __mod_objcg_mlstate(objcg, pgdat, idx, nr);
 
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
        obj_cgroup_put(old);
 }
 
@@ -2792,7 +2791,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
        unsigned long flags;
        bool ret = false;
 
-       localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+       local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
        stock = this_cpu_ptr(&memcg_stock);
        if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
@@ -2800,7 +2799,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
                ret = true;
        }
 
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
 
        return ret;
 }
@@ -2892,7 +2891,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
        unsigned long flags;
        unsigned int nr_pages = 0;
 
-       localtry_lock_irqsave(&memcg_stock.stock_lock, flags);
+       local_lock_irqsave(&memcg_stock.stock_lock, flags);
 
        stock = this_cpu_ptr(&memcg_stock);
        if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -2910,7 +2909,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
                stock->nr_bytes &= (PAGE_SIZE - 1);
        }
 
-       localtry_unlock_irqrestore(&memcg_stock.stock_lock, flags);
+       local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
        obj_cgroup_put(old);
 
        if (nr_pages)