};
                struct {        /* page_pool used by netstack */
                        /**
-                        * @dma_addr: might require a 64-bit value even on
+                        * @dma_addr: might require a 64-bit value on
                         * 32-bit architectures.
                         */
-                       dma_addr_t dma_addr;
+                       unsigned long dma_addr[2];
                };
                struct {        /* slab, slob and slub */
                        union {
 
 
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 {
-       return page->dma_addr;
+       dma_addr_t ret = page->dma_addr[0];
+       if (sizeof(dma_addr_t) > sizeof(unsigned long))
+               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
+       return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+       page->dma_addr[0] = addr;
+       if (sizeof(dma_addr_t) > sizeof(unsigned long))
+               page->dma_addr[1] = upper_32_bits(addr);
 }
 
 static inline bool is_page_pool_compiled_in(void)
 
                                          struct page *page,
                                          unsigned int dma_sync_size)
 {
+       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
+
        dma_sync_size = min(dma_sync_size, pool->p.max_len);
-       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
                                         pool->p.offset, dma_sync_size,
                                         pool->p.dma_dir);
 }
        if (dma_mapping_error(pool->p.dev, dma))
                return false;
 
-       page->dma_addr = dma;
+       page_pool_set_dma_addr(page, dma);
 
        if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
                page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
                 */
                goto skip_dma_unmap;
 
-       dma = page->dma_addr;
+       dma = page_pool_get_dma_addr(page);
 
-       /* When page is unmapped, it cannot be returned our pool */
+       /* When page is unmapped, it cannot be returned to our pool */
        dma_unmap_page_attrs(pool->p.dev, dma,
                             PAGE_SIZE << pool->p.order, pool->p.dma_dir,
                             DMA_ATTR_SKIP_CPU_SYNC);
-       page->dma_addr = 0;
+       page_pool_set_dma_addr(page, 0);
 skip_dma_unmap:
        /* This may be the last page returned, releasing the pool, so
         * it is not safe to reference pool afterwards.