all pipe_buffer_operations have the same instances of those...
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
                if (len + offset > PAGE_SIZE)
                        len = PAGE_SIZE - offset;
 
-               src = buf->ops->map(pipe, buf, 1);
+               src = kmap_atomic(buf->page);
                memcpy(page_address(page) + offset, src + buf->offset, len);
-               buf->ops->unmap(pipe, buf, src);
+               kunmap_atomic(src);
 
                sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
        }
 
                struct pipe_buffer *buf = cs->currbuf;
 
                if (!cs->write) {
-                       buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
+                       kunmap_atomic(cs->mapaddr);
                } else {
                        kunmap_atomic(cs->mapaddr);
                        buf->len = PAGE_SIZE - cs->len;
 
                        BUG_ON(!cs->nr_segs);
                        cs->currbuf = buf;
-                       cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
+                       cs->mapaddr = kmap_atomic(buf->page);
                        cs->len = buf->len;
                        cs->buf = cs->mapaddr + buf->offset;
                        cs->pipebufs++;
 out_fallback_unlock:
        unlock_page(newpage);
 out_fallback:
-       cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
+       cs->mapaddr = kmap_atomic(buf->page);
        cs->buf = cs->mapaddr + buf->offset;
 
        err = lock_request(cs->fc, cs->req);
 
                page_cache_release(page);
 }
 
-/**
- * generic_pipe_buf_map - virtually map a pipe buffer
- * @pipe:      the pipe that the buffer belongs to
- * @buf:       the buffer that should be mapped
- * @atomic:    whether to use an atomic map
- *
- * Description:
- *     This function returns a kernel virtual address mapping for the
- *     pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
- *     and the caller has to be careful not to fault before calling
- *     the unmap function.
- *
- *     Note that this function calls kmap_atomic() if @atomic != 0.
- */
-void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
-                          struct pipe_buffer *buf, int atomic)
-{
-       if (atomic) {
-               buf->flags |= PIPE_BUF_FLAG_ATOMIC;
-               return kmap_atomic(buf->page);
-       }
-
-       return kmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_map);
-
-/**
- * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
- * @pipe:      the pipe that the buffer belongs to
- * @buf:       the buffer that should be unmapped
- * @map_data:  the data that the mapping function returned
- *
- * Description:
- *     This function undoes the mapping that ->map() provided.
- */
-void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
-                           struct pipe_buffer *buf, void *map_data)
-{
-       if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
-               buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
-               kunmap_atomic(map_data);
-       } else
-               kunmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_unmap);
-
 /**
  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
  * @pipe:      the pipe that the buffer belongs to
 
 static const struct pipe_buf_operations anon_pipe_buf_ops = {
        .can_merge = 1,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = anon_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
 
 static const struct pipe_buf_operations packet_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = anon_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
 
                        atomic = !iov_fault_in_pages_write(iov, chars);
 redo:
-                       addr = ops->map(pipe, buf, atomic);
+                       if (atomic)
+                               addr = kmap_atomic(buf->page);
+                       else
+                               addr = kmap(buf->page);
                        error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
-                       ops->unmap(pipe, buf, addr);
+                       if (atomic)
+                               kunmap_atomic(addr);
+                       else
+                               kunmap(buf->page);
                        if (unlikely(error)) {
                                /*
                                 * Just retry with the slow path if we failed.
 
                        iov_fault_in_pages_read(iov, chars);
 redo1:
-                       addr = ops->map(pipe, buf, atomic);
+                       if (atomic)
+                               addr = kmap_atomic(buf->page);
+                       else
+                               addr = kmap(buf->page);
                        error = pipe_iov_copy_from_user(offset + addr, iov,
                                                        chars, atomic);
-                       ops->unmap(pipe, buf, addr);
+                       if (atomic)
+                               kunmap_atomic(addr);
+                       else
+                               kunmap(buf->page);
                        ret = error;
                        do_wakeup = 1;
                        if (error) {
 
 
 const struct pipe_buf_operations page_cache_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = page_cache_pipe_buf_confirm,
        .release = page_cache_pipe_buf_release,
        .steal = page_cache_pipe_buf_steal,
 
 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = page_cache_pipe_buf_release,
        .steal = user_page_pipe_buf_steal,
 
 static const struct pipe_buf_operations default_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = generic_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
 /* Pipe buffer operations for a socket and similar. */
 const struct pipe_buf_operations nosteal_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = generic_pipe_buf_release,
        .steal = generic_pipe_buf_nosteal,
                goto out;
 
        if (buf->page != page) {
-               char *src = buf->ops->map(pipe, buf, 1);
+               char *src = kmap_atomic(buf->page);
                char *dst = kmap_atomic(page);
 
                memcpy(dst + offset, src + buf->offset, this_len);
                flush_dcache_page(page);
                kunmap_atomic(dst);
-               buf->ops->unmap(pipe, buf, src);
+               kunmap_atomic(src);
        }
        ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
                                page, fsdata);
        void *data;
        loff_t tmp = sd->pos;
 
-       data = buf->ops->map(pipe, buf, 0);
+       data = kmap(buf->page);
        ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
-       buf->ops->unmap(pipe, buf, data);
+       kunmap(buf->page);
 
        return ret;
 }
         * pages and doing an atomic copy
         */
        if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
-               src = buf->ops->map(pipe, buf, 1);
+               src = kmap_atomic(buf->page);
                ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
                                                        sd->len);
-               buf->ops->unmap(pipe, buf, src);
+               kunmap_atomic(src);
                if (!ret) {
                        ret = sd->len;
                        goto out;
        /*
         * No dice, use slow non-atomic map and copy
         */
-       src = buf->ops->map(pipe, buf, 0);
+       src = kmap(buf->page);
 
        ret = sd->len;
        if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
                ret = -EFAULT;
 
-       buf->ops->unmap(pipe, buf, src);
+       kunmap(buf->page);
 out:
        if (ret > 0)
                sd->u.userptr += ret;
 
         */
        int can_merge;
 
-       /*
-        * ->map() returns a virtual address mapping of the pipe buffer.
-        * The last integer flag reflects whether this should be an atomic
-        * mapping or not. The atomic map is faster, however you can't take
-        * page faults before calling ->unmap() again. So if you need to eg
-        * access user data through copy_to/from_user(), then you must get
-        * a non-atomic map. ->map() uses the kmap_atomic slot for
-        * atomic maps, you have to be careful if mapping another page as
-        * source or destination for a copy.
-        */
-       void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
-
-       /*
-        * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
-        */
-       void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
-
        /*
         * ->confirm() verifies that the data in the pipe buffer is there
         * and that the contents are good. If the pages in the pipe belong
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
 
 
 static const struct pipe_buf_operations relay_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = relay_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
 
 
 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
        .can_merge              = 0,
-       .map                    = generic_pipe_buf_map,
-       .unmap                  = generic_pipe_buf_unmap,
        .confirm                = generic_pipe_buf_confirm,
        .release                = generic_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,
 /* Pipe buffer operations for a buffer. */
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .can_merge              = 0,
-       .map                    = generic_pipe_buf_map,
-       .unmap                  = generic_pipe_buf_unmap,
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,