goto end;
                }
                nc->frag.size = PAGE_SIZE << order;
-recycle:
-               atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
+               /* Even if we own the page, we do not use atomic_set().
+                * This would break get_page_unless_zero() users.
+                */
+               atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
+                          &nc->frag.page->_count);
                nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
                nc->frag.offset = 0;
        }
 
        if (nc->frag.offset + fragsz > nc->frag.size) {
-               /* avoid unnecessary locked operations if possible */
-               if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
-                   atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
-                       goto recycle;
-               goto refill;
+               if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
+                       if (!atomic_sub_and_test(nc->pagecnt_bias,
+                                                &nc->frag.page->_count))
+                               goto refill;
+                       /* OK, page count is 0, we can safely set it */
+                       atomic_set(&nc->frag.page->_count,
+                                  NETDEV_PAGECNT_MAX_BIAS);
+               } else {
+                       atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
+                                  &nc->frag.page->_count);
+               }
+               nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
+               nc->frag.offset = 0;
        }
 
        data = page_address(nc->frag.page) + nc->frag.offset;