spin_unlock_irq(&gb_operations_lock);
 
        /* Store the operation id in the request header */
-       header = operation->request.buffer;
+       header = operation->request.header;
        header->operation_id = cpu_to_le16(operation->id);
 }
 
 
        message->cookie = connection->hd->driver->buffer_send(connection->hd,
                                        dest_cport_id,
-                                       message->buffer,
-                                       message->buffer_size,
+                                       message->header,
+                                       message->size,
                                        gfp_mask);
        if (IS_ERR(message->cookie)) {
                ret = PTR_ERR(message->cookie);
        struct gb_protocol *protocol = operation->connection->protocol;
        struct gb_operation_msg_hdr *header;
 
-       header = operation->request.buffer;
+       header = operation->request.header;
 
        /*
         * If the protocol has no incoming request handler, report
        bool incoming_request;
 
        operation = container_of(recv_work, struct gb_operation, recv_work);
-       incoming_request = operation->response.buffer == NULL;
+       incoming_request = operation->response.header == NULL;
        if (incoming_request)
                gb_operation_request_handle(operation);
        gb_operation_complete(operation);
                type |= GB_OPERATION_TYPE_RESPONSE;
        }
 
-       message->buffer = gb_buffer_alloc(hd, size, gfp_flags);
-       if (!message->buffer)
+       message->header = gb_buffer_alloc(hd, size, gfp_flags);
+       if (!message->header)
                return -ENOMEM;
-       message->buffer_size = size;
+       message->size = size;
 
        /* Fill in the header structure */
-       header = message->buffer;
+       header = message->header;
        header->size = cpu_to_le16(size);
        header->operation_id = 0;       /* Filled in when submitted */
        header->type = type;
        struct greybus_host_device *hd;
 
        hd = message->operation->connection->hd;
-       gb_buffer_free(hd, message->buffer);
+       gb_buffer_free(hd, message->header);
 
        message->operation = NULL;
        message->payload = NULL;
-       message->buffer = NULL;
-       message->buffer_size = 0;
+       message->header = NULL;
+       message->size = 0;
 }
 
 /*
                return;         /* XXX Respond with pre-allocated ENOMEM */
        }
        operation->id = operation_id;
-       memcpy(operation->request.buffer, data, size);
+       memcpy(operation->request.header, data, size);
 
        /* The rest will be handled in work queue context */
        queue_work(gb_operation_recv_workqueue, &operation->recv_work);
        gb_pending_operation_remove(operation);
 
        message = &operation->response;
-       if (size <= message->buffer_size) {
+       if (size <= message->size) {
                /* Transfer the operation result from the response header */
-               header = message->buffer;
+               header = message->header;
                operation->result = header->result;
        } else {
                gb_connection_err(connection, "recv buffer too small");
 
        /* We must ignore the payload if a bad status is returned */
        if (operation->result == GB_OP_SUCCESS)
-               memcpy(message->buffer, data, size);
+               memcpy(message->header, data, size);
 
        /* The rest will be handled in work queue context */
        queue_work(gb_operation_recv_workqueue, &operation->recv_work);
 {
        operation->canceled = true;
        gb_message_cancel(&operation->request);
-       if (operation->response.buffer)
+       if (operation->response.header)
                gb_message_cancel(&operation->response);
 }