/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2021 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
 #define SVGA_MAGIC         0x900000UL
 #define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
 
+/* Version 3 has the control bar instead of the FIFO */
+#define SVGA_VERSION_3     3
+#define SVGA_ID_3          SVGA_MAKE_ID(SVGA_VERSION_3)
+
 /* Version 2 let the address of the frame buffer be unsigned on Win32 */
 #define SVGA_VERSION_2     2
 #define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
  * Interrupts are only supported when the
  * SVGA_CAP_IRQMASK capability is present.
  */
-#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
-#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
-#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
-#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
-#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
+#define SVGA_IRQFLAG_ANY_FENCE            (1 << 0) /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS        (1 << 1) /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL           (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER       (1 << 3) /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR                (1 << 4) /* Error while processing commands */
+#define SVGA_IRQFLAG_MAX                  (1 << 5)
 
 /*
  * The byte-size is the size of the actual cursor data,
     */
    SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
 
-   SVGA_REG_TOP = 77,               /* Must be 1 more than the last register */
+   /*
+    +    * These registers are for the addresses of the memory BARs for SVGA3
+    */
+   SVGA_REG_REGS_START_HIGH32 = 77,
+   SVGA_REG_REGS_START_LOW32 = 78,
+   SVGA_REG_FB_START_HIGH32 = 79,
+   SVGA_REG_FB_START_LOW32 = 80,
+
+   /*
+    * A hint register that recommends which quality level the guest should
+    * currently use to define multisample surfaces.
+    *
+    * If the register is SVGA_REG_MSHINT_DISABLED,
+    * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL.
+    *
+    * Otherwise, this is a live value that can change while the VM is
+    * powered on with the hint suggestion for which quality level the guest
+    * should be using.  Guests are free to ignore the hint and use either
+    * RESOLVE or FULL quality.
+    */
+   SVGA_REG_MSHINT = 81,
+
+   SVGA_REG_IRQ_STATUS = 82,
+   SVGA_REG_DIRTY_TRACKING = 83,
+
+   SVGA_REG_TOP = 84,               /* Must be 1 more than the last register */
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
    SVGA_REG_GUEST_DRIVER_ID_SUBMIT  = MAX_UINT32,
 } SVGARegGuestDriverId;
 
+typedef enum SVGARegMSHint {
+   SVGA_REG_MSHINT_DISABLED = 0,
+   SVGA_REG_MSHINT_FULL     = 1,
+   SVGA_REG_MSHINT_RESOLVED = 2,
+} SVGARegMSHint;
+
+typedef enum SVGARegDirtyTracking {
+   SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0,
+   SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1,
+} SVGARegDirtyTracking;
+
 
 /*
  * Guest memory regions (GMRs):
 
 }
 
 /**
- * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
  *
  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  * @shader_slot: The shader slot of the binding.
 }
 
 /**
- * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
  *
  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  */
 }
 
 /**
- * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
  *
  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
  *
 }
 
 /**
- * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
  *
  * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
  *
 
 }
 
 /**
- * ttm_bo_cpu_blit - in-kernel cpu blit.
+ * vmw_bo_cpu_blit - in-kernel cpu blit.
  *
  * @dst: Destination buffer object.
  * @dst_offset: Destination offset of blit start in bytes.
 
 
 
 /**
- * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
  * for vmw user buffer objects
  *
  * @base: Pointer to the TTM base object
 
 
 #include "vmwgfx_drv.h"
 
-struct vmw_temp_set_context {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdDXTempSetContext body;
-};
-
 bool vmw_supports_3d(struct vmw_private *dev_priv)
 {
        uint32_t fifo_min, hwversion;
-       const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+       const struct vmw_fifo_state *fifo = dev_priv->fifo;
 
        if (!(dev_priv->capabilities & SVGA_CAP_3D))
                return false;
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
 
+       BUG_ON(vmw_is_svga_v3(dev_priv));
+
        fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
        return false;
 }
 
-int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
 {
+       struct vmw_fifo_state *fifo;
        uint32_t max;
        uint32_t min;
 
-       fifo->dx = false;
+       if (!dev_priv->fifo_mem)
+               return NULL;
+
+       fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
        fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
        fifo->static_buffer = vmalloc(fifo->static_buffer_size);
        if (unlikely(fifo->static_buffer == NULL))
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        fifo->dynamic_buffer = NULL;
        fifo->reserved_size = 0;
 
        mutex_init(&fifo->fifo_mutex);
        init_rwsem(&fifo->rwsem);
-
-       DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
-       DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
-       DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
-
-       dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
-       dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
-       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-
-       vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
-                 SVGA_REG_ENABLE_HIDE);
-
-       vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-
        min = 4;
        if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
                min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
                 (unsigned int) max,
                 (unsigned int) min,
                 (unsigned int) fifo->capabilities);
-
-       atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
-       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
-
-       return 0;
+       return fifo;
 }
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
        u32 *fifo_mem = dev_priv->fifo_mem;
-
-       if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
+       if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+
 }
 
-void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+void vmw_fifo_destroy(struct vmw_private *dev_priv)
 {
-       vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
-       while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
-               ;
-
-       dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+       struct vmw_fifo_state *fifo = dev_priv->fifo;
 
-       vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
-                 dev_priv->config_done_state);
-       vmw_write(dev_priv, SVGA_REG_ENABLE,
-                 dev_priv->enable_state);
-       vmw_write(dev_priv, SVGA_REG_TRACES,
-                 dev_priv->traces_state);
+       if (!fifo)
+               return;
 
        if (likely(fifo->static_buffer != NULL)) {
                vfree(fifo->static_buffer);
                vfree(fifo->dynamic_buffer);
                fifo->dynamic_buffer = NULL;
        }
+       kfree(fifo);
+       dev_priv->fifo = NULL;
 }
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                    uint32_t bytes)
 {
-       struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+       struct vmw_fifo_state *fifo_state = dev_priv->fifo;
        u32  *fifo_mem = dev_priv->fifo_mem;
        uint32_t max;
        uint32_t min;
 
 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
-       struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+       struct vmw_fifo_state *fifo_state = dev_priv->fifo;
        uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
        uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
        uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
        bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
-       if (fifo_state->dx)
-               bytes += sizeof(struct vmw_temp_set_context);
-
-       fifo_state->dx = false;
        BUG_ON((bytes & 3) != 0);
        BUG_ON(bytes > fifo_state->reserved_size);
 
 
 
 /**
- * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
  *
  * @dev_priv: Pointer to device private structure.
  * @bytes: Number of bytes to commit.
 }
 
 /**
- * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * vmw_cmd_flush - Flush any buffered commands and make sure command processing
  * starts.
  *
  * @dev_priv: Pointer to device private structure.
 
 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 {
-       struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
        struct svga_fifo_cmd_fence *cmd_fence;
        u32 *fm;
        int ret = 0;
                *seqno = atomic_add_return(1, &dev_priv->marker_seq);
        } while (*seqno == 0);
 
-       if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+       if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
 
                /*
                 * Don't request hardware to send a fence. The
        cmd_fence = (struct svga_fifo_cmd_fence *) fm;
        cmd_fence->fence = *seqno;
        vmw_cmd_commit_flush(dev_priv, bytes);
-       vmw_update_seqno(dev_priv, fifo_state);
+       vmw_update_seqno(dev_priv);
 
 out_err:
        return ret;
 }
 
 /**
- * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
  * legacy query commands.
  *
  * @dev_priv: The device private structure.
  * @cid: The hardware context id used for the query.
  *
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
  */
-static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
                                            uint32_t cid)
 {
        /*
 }
 
 /**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
  * guest-backed resource query commands.
  *
  * @dev_priv: The device private structure.
  * @cid: The hardware context id used for the query.
  *
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
  */
-static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
-                                       uint32_t cid)
+static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
+                                      uint32_t cid)
 {
        /*
         * A query wait without a preceding query end will
 
 
 /**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
  * appropriate resource query commands.
  *
  * @dev_priv: The device private structure.
                              uint32_t cid)
 {
        if (dev_priv->has_mob)
-               return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+               return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
 
-       return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+       return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
+}
+
+
+/**
+ * vmw_cmd_supported - returns true if the given device supports
+ * command queues.
+ *
+ * @vmw: The device private structure.
+ *
+ * Returns true if we can issue commands.
+ */
+bool vmw_cmd_supported(struct vmw_private *vmw)
+{
+       if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+                                 SVGA_CAP_CMD_BUFFERS_2)) != 0)
+               return true;
+       /*
+        * We have FIFO cmd's
+        */
+       return vmw->fifo_mem != NULL;
 }
 
 
 
 /**
- * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
  *
  * @header: The header of the buffer to submit.
  */
 }
 
 /**
- * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
  *
  * @man: The command buffer manager.
  * @check_preempted: Check also the preempted queue for pending command buffers.
 
 }
 
 /**
- * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ * vmw_cotable_add_resource - add a view to the cotable's list of active views.
  *
  * @res: pointer struct vmw_resource representing the cotable.
  * @head: pointer to the struct list_head member of the resource, dedicated
 
 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
 
 
-/**
+/*
  * Fully encoded drm commands. Might move to vmw_drm.h
  */
 
 
 static const struct pci_device_id vmw_pci_id_list[] = {
        { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+       { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
        { }
 };
 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
        return ret;
 }
 
+static int vmw_device_init(struct vmw_private *dev_priv)
+{
+       bool uses_fb_traces = false;
+
+       DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+       DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+       DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+       dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+       dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+       dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+
+       vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+                 SVGA_REG_ENABLE_HIDE);
+
+       uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
+                        (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
+
+       vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
+       dev_priv->fifo = vmw_fifo_create(dev_priv);
+       if (IS_ERR(dev_priv->fifo)) {
+               int err = PTR_ERR(dev_priv->fifo);
+               dev_priv->fifo = NULL;
+               return err;
+       } else if (!dev_priv->fifo) {
+               vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+       }
+
+       dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
+       atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+       return 0;
+}
+
+static void vmw_device_fini(struct vmw_private *vmw)
+{
+       /*
+        * Legacy sync
+        */
+       vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+       while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
+               ;
+
+       vmw->last_read_seqno = vmw_fence_read(vmw);
+
+       vmw_write(vmw, SVGA_REG_CONFIG_DONE,
+                 vmw->config_done_state);
+       vmw_write(vmw, SVGA_REG_ENABLE,
+                 vmw->enable_state);
+       vmw_write(vmw, SVGA_REG_TRACES,
+                 vmw->traces_state);
+
+       vmw_fifo_destroy(vmw);
+}
+
 /**
  * vmw_request_device_late - Perform late device setup
  *
 {
        int ret;
 
-       ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+       ret = vmw_device_init(dev_priv);
        if (unlikely(ret != 0)) {
-               DRM_ERROR("Unable to initialize FIFO.\n");
+               DRM_ERROR("Unable to initialize the device.\n");
                return ret;
        }
        vmw_fence_fifo_up(dev_priv->fman);
                vmw_cmdbuf_man_destroy(dev_priv->cman);
 out_no_mob:
        vmw_fence_fifo_down(dev_priv->fman);
-       vmw_fifo_release(dev_priv, &dev_priv->fifo);
+       vmw_device_fini(dev_priv);
        return ret;
 }
 
        if (dev_priv->cman)
                vmw_cmdbuf_man_destroy(dev_priv->cman);
 
-       vmw_fifo_release(dev_priv, &dev_priv->fifo);
+       vmw_device_fini(dev_priv);
 }
 
 /*
 static int vmw_setup_pci_resources(struct vmw_private *dev,
                                   unsigned long pci_id)
 {
+       resource_size_t rmmio_start;
+       resource_size_t rmmio_size;
        resource_size_t fifo_start;
        resource_size_t fifo_size;
        int ret;
        if (ret)
                return ret;
 
-       dev->io_start = pci_resource_start(pdev, 0);
-       dev->vram_start = pci_resource_start(pdev, 1);
-       dev->vram_size = pci_resource_len(pdev, 1);
-       fifo_start = pci_resource_start(pdev, 2);
-       fifo_size = pci_resource_len(pdev, 2);
-
-       DRM_INFO("FIFO at %pa size is %llu kiB\n",
-                &fifo_start, (uint64_t)fifo_size / 1024);
-       dev->fifo_mem = devm_memremap(dev->drm.dev,
-                                     fifo_start,
-                                     fifo_size,
-                                     MEMREMAP_WB);
-
-       if (IS_ERR(dev->fifo_mem)) {
-               DRM_ERROR("Failed mapping FIFO memory.\n");
+       dev->pci_id = pci_id;
+       if (pci_id == VMWGFX_PCI_ID_SVGA3) {
+               rmmio_start = pci_resource_start(pdev, 0);
+               rmmio_size = pci_resource_len(pdev, 0);
+               dev->vram_start = pci_resource_start(pdev, 2);
+               dev->vram_size = pci_resource_len(pdev, 2);
+
+               DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
+                        &rmmio_start, (uint64_t)rmmio_size / 1024);
+               dev->rmmio = devm_ioremap(dev->drm.dev,
+                                         rmmio_start,
+                                         rmmio_size);
+               if (IS_ERR(dev->rmmio)) {
+                       DRM_ERROR("Failed mapping registers mmio memory.\n");
+                       pci_release_regions(pdev);
+                       return PTR_ERR(dev->rmmio);
+               }
+       } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
+               dev->io_start = pci_resource_start(pdev, 0);
+               dev->vram_start = pci_resource_start(pdev, 1);
+               dev->vram_size = pci_resource_len(pdev, 1);
+               fifo_start = pci_resource_start(pdev, 2);
+               fifo_size = pci_resource_len(pdev, 2);
+
+               DRM_INFO("FIFO at %pa size is %llu kiB\n",
+                        &fifo_start, (uint64_t)fifo_size / 1024);
+               dev->fifo_mem = devm_memremap(dev->drm.dev,
+                                             fifo_start,
+                                             fifo_size,
+                                             MEMREMAP_WB);
+
+               if (IS_ERR(dev->fifo_mem)) {
+                       DRM_ERROR("Failed mapping FIFO memory.\n");
+                       pci_release_regions(pdev);
+                       return PTR_ERR(dev->fifo_mem);
+               }
+       } else {
                pci_release_regions(pdev);
-               return PTR_ERR(dev->fifo_mem);
+               return -EINVAL;
        }
 
        /*
 {
        uint32_t svga_id;
 
-       vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+       vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
+                         SVGA_ID_3 : SVGA_ID_2);
        svga_id = vmw_read(dev, SVGA_REG_ID);
-       if (svga_id != SVGA_ID_2) {
+       if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
                DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
                          svga_id, dev->vmw_chipset);
                return -ENOSYS;
        }
+       BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
+       DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
        return 0;
 }
 
        struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
 
        dev_priv->vmw_chipset = pci_id;
-       dev_priv->last_read_seqno = (uint32_t) -100;
        dev_priv->drm.dev_private = dev_priv;
 
        mutex_init(&dev_priv->cmdbuf_mutex);
        vmw_print_capabilities(dev_priv->capabilities);
        if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
                vmw_print_capabilities2(dev_priv->capabilities2);
+       DRM_INFO("Supports command queues = %d\n",
+                vmw_cmd_supported((dev_priv)));
 
        ret = vmw_dma_masks(dev_priv);
        if (unlikely(ret != 0))
        struct vmw_private *dev_priv = vmw_priv(dev);
        int ret;
 
-       vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
-       (void) vmw_read(dev_priv, SVGA_REG_ID);
+       vmw_detect_version(dev_priv);
 
        if (dev_priv->enable_fb)
                vmw_fifo_resource_inc(dev_priv);
        .release = drm_release,
        .unlocked_ioctl = vmw_unlocked_ioctl,
        .mmap = vmw_mmap,
-       .poll = vmw_fops_poll,
-       .read = vmw_fops_read,
+       .poll = drm_poll,
+       .read = drm_read,
 #if defined(CONFIG_COMPAT)
        .compat_ioctl = vmw_compat_ioctl,
 #endif
 
 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
 #define VMWGFX_PCI_ID_SVGA2              0x0405
+#define VMWGFX_PCI_ID_SVGA3              0x0406
 
 /*
  * Perhaps we should have sysfs entries for these.
        uint32_t capabilities;
        struct mutex fifo_mutex;
        struct rw_semaphore rwsem;
-       bool dx;
 };
 
 /**
        struct drm_device drm;
        struct ttm_device bdev;
 
-       struct vmw_fifo_state fifo;
-
        struct drm_vma_offset_manager vma_manager;
+       unsigned long pci_id;
        u32 vmw_chipset;
        resource_size_t io_start;
        resource_size_t vram_start;
        resource_size_t vram_size;
        resource_size_t prim_bb_mem;
+       void __iomem *rmmio;
        u32 *fifo_mem;
        resource_size_t fifo_mem_size;
        uint32_t fb_max_width;
         */
        struct vmw_otable_batch otable_batch;
 
+       struct vmw_fifo_state *fifo;
        struct vmw_cmdbuf_man *cman;
        DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
 
        return (struct vmw_fpriv *)file_priv->driver_priv;
 }
 
+/*
+ * SVGA v3 has mmio register access and lacks fifo cmds
+ */
+static inline bool vmw_is_svga_v3(const struct vmw_private *dev)
+{
+       return dev->pci_id == VMWGFX_PCI_ID_SVGA3;
+}
+
 /*
  * The locking here is fine-grained, so that it is performed once
  * for every read- and write operation. This is of course costly, but we
 static inline void vmw_write(struct vmw_private *dev_priv,
                             unsigned int offset, uint32_t value)
 {
-       spin_lock(&dev_priv->hw_lock);
-       outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
-       outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
-       spin_unlock(&dev_priv->hw_lock);
+       if (vmw_is_svga_v3(dev_priv)) {
+               iowrite32(value, dev_priv->rmmio + offset);
+       } else {
+               spin_lock(&dev_priv->hw_lock);
+               outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+               outl(value, dev_priv->io_start + SVGA_VALUE_PORT);
+               spin_unlock(&dev_priv->hw_lock);
+       }
 }
 
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 {
        u32 val;
 
-       spin_lock(&dev_priv->hw_lock);
-       outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
-       val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
-       spin_unlock(&dev_priv->hw_lock);
+       if (vmw_is_svga_v3(dev_priv)) {
+               val = ioread32(dev_priv->rmmio + offset);
+       } else {
+               spin_lock(&dev_priv->hw_lock);
+               outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+               val = inl(dev_priv->io_start + SVGA_VALUE_PORT);
+               spin_unlock(&dev_priv->hw_lock);
+       }
 
        return val;
 }
                             struct drm_file *file_priv);
 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
-extern __poll_t vmw_fops_poll(struct file *filp,
-                                 struct poll_table_struct *wait);
-extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
-                            size_t count, loff_t *offset);
 
 /**
  * Fifo utilities - vmwgfx_fifo.c
  */
 
-extern int vmw_fifo_init(struct vmw_private *dev_priv,
-                        struct vmw_fifo_state *fifo);
-extern void vmw_fifo_release(struct vmw_private *dev_priv,
-                            struct vmw_fifo_state *fifo);
+extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv);
+extern void vmw_fifo_destroy(struct vmw_private *dev_priv);
+extern bool vmw_cmd_supported(struct vmw_private *vmw);
 extern void *
 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
 #define VMW_CMD_RESERVE(__priv, __bytes)                                     \
        VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
 
+
+/**
+ * vmw_fifo_caps - Returns the capabilities of the FIFO command
+ * queue or 0 if fifo memory isn't present.
+ * @dev_priv: The device private context
+ */
+static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
+{
+       if (!dev_priv->fifo_mem || !dev_priv->fifo)
+               return 0;
+       return dev_priv->fifo->capabilities;
+}
+
+
+/**
+ * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
+ * is enabled in the FIFO.
+ * @dev_priv: The device private context
+ */
+static inline bool
+vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
+{
+       return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
+}
+
 /**
  * TTM glue - vmwgfx_ttm_glue.c
  */
  * IRQs and wating - vmwgfx_irq.c
  */
 
-extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
-                         uint32_t seqno, bool interruptible,
-                         unsigned long timeout);
 extern int vmw_irq_install(struct drm_device *dev, int irq);
 extern void vmw_irq_uninstall(struct drm_device *dev);
 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
                             uint32_t seqno,
                             bool interruptible,
                             unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv,
-                               struct vmw_fifo_state *fifo_state);
+extern void vmw_update_seqno(struct vmw_private *dev_priv);
 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
  */
 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
 {
+       BUG_ON(vmw_is_svga_v3(vmw));
        return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
 }
 
 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
                                      u32 value)
 {
+       BUG_ON(vmw_is_svga_v3(vmw));
        WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
 }
+
+static inline u32 vmw_fence_read(struct vmw_private *dev_priv)
+{
+       u32 fence;
+       if (vmw_is_svga_v3(dev_priv))
+               fence = vmw_read(dev_priv, SVGA_REG_FENCE);
+       else
+               fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+       return fence;
+}
+
+static inline void vmw_fence_write(struct vmw_private *dev_priv,
+                                 u32 fence)
+{
+       BUG_ON(vmw_is_svga_v3(dev_priv));
+       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence);
+}
+
+static inline u32 vmw_irq_status_read(struct vmw_private *vmw)
+{
+       u32 status;
+       if (vmw_is_svga_v3(vmw))
+               status = vmw_read(vmw, SVGA_REG_IRQ_STATUS);
+       else
+               status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT);
+       return status;
+}
+
+static inline void vmw_irq_status_write(struct vmw_private *vmw,
+                                       uint32 status)
+{
+       if (vmw_is_svga_v3(vmw))
+               vmw_write(vmw, SVGA_REG_IRQ_STATUS, status);
+       else
+               outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
+}
+
 #endif
 
 }
 
 /**
- * vmw_rebind_dx_query - Rebind DX query associated with the context
+ * vmw_rebind_all_dx_query - Rebind DX query associated with the context
  *
  * @ctx_res: context the query belongs to
  *
 }
 
 /**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
  * to a MOB id.
  *
  * @dev_priv: Pointer to a device private structure.
 }
 
 /**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
  * to a valid SVGAGuestPtr
  *
  * @dev_priv: Pointer to a device private structure.
 }
 
 /**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate
+ * vmw_cmd_dx_set_index_buffer - Validate
  * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
  *
  * @dev_priv: Pointer to a device private struct.
 }
 
 /**
- * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
  * command
  *
  * @dev_priv: Pointer to a device private struct.
 }
 
 /**
- * vmw_cmd_dx_clear_rendertarget_view - Validate
+ * vmw_cmd_dx_clear_depthstencil_view - Validate
  * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
  *
  * @dev_priv: Pointer to a device private struct.
 
                fence_rep.handle = fence_handle;
                fence_rep.seqno = fence->base.seqno;
-               vmw_update_seqno(dev_priv, &dev_priv->fifo);
+               vmw_update_seqno(dev_priv);
                fence_rep.passed_seqno = dev_priv->last_read_seqno;
        }
 
 
        struct vmw_fence_manager *fman = fman_from_fence(fence);
        struct vmw_private *dev_priv = fman->dev_priv;
 
-       u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+       u32 seqno = vmw_fence_read(dev_priv);
        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
                return false;
 
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
        return true;
 }
 
        if (likely(vmw_fence_obj_signaled(fence)))
                return timeout;
 
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
        vmw_seqno_waiter_add(dev_priv);
 
        spin_lock(f->lock);
        bool needs_rerun;
        uint32_t seqno, new_seqno;
 
-       seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+       seqno = vmw_fence_read(fman->dev_priv);
 rerun:
        list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
                if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 
        needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
        if (unlikely(needs_rerun)) {
-               new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+               new_seqno = vmw_fence_read(fman->dev_priv);
                if (new_seqno != seqno) {
                        seqno = new_seqno;
                        goto rerun;
                return ret;
 }
 
-void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
-{
-       struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
-
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-}
-
 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 {
        dma_fence_free(&fence->base);
 }
 
 /**
- * vmw_event_fence_action_create - Post an event for sending when a fence
+ * vmw_event_fence_action_queue - Post an event for sending when a fence
  * object seqno has passed.
  *
  * @file_priv: The file connection on which the event should be posted.
 
                              bool lazy,
                              bool interruptible, unsigned long timeout);
 
-extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
-
 extern int vmw_fence_create(struct vmw_fence_manager *fman,
                            uint32_t seqno,
                            struct vmw_fence_obj **p_fence);
 
                param->value = dev_priv->capabilities2;
                break;
        case DRM_VMW_PARAM_FIFO_CAPS:
-               param->value = dev_priv->fifo.capabilities;
+               param->value = vmw_fifo_caps(dev_priv);
                break;
        case DRM_VMW_PARAM_MAX_FB_SIZE:
                param->value = dev_priv->prim_bb_mem;
                break;
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
-               const struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
                if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
                        param->value = SVGA3D_HWVERSION_WS8_B1;
                        break;
 
                param->value =
                        vmw_fifo_mem_read(dev_priv,
-                                         ((fifo->capabilities &
+                                         ((vmw_fifo_caps(dev_priv) &
                                            SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
                                                   SVGA_FIFO_3D_HWVERSION_REVISED :
                                                   SVGA_FIFO_3D_HWVERSION));
 out_clips:
        return ret;
 }
-
-
-/**
- * vmw_fops_poll - wrapper around the drm_poll function
- *
- * @filp: See the linux fops poll documentation.
- * @wait: See the linux fops poll documentation.
- *
- * Wrapper around the drm_poll function that makes sure the device is
- * processing the fifo if drm_poll decides to wait.
- */
-__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
-{
-       struct drm_file *file_priv = filp->private_data;
-       struct vmw_private *dev_priv =
-               vmw_priv(file_priv->minor->dev);
-
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-       return drm_poll(filp, wait);
-}
-
-
-/**
- * vmw_fops_read - wrapper around the drm_read function
- *
- * @filp: See the linux fops read documentation.
- * @buffer: See the linux fops read documentation.
- * @count: See the linux fops read documentation.
- * @offset: See the linux fops read documentation.
- *
- * Wrapper around the drm_read function that makes sure the device is
- * processing the fifo if drm_read decides to wait.
- */
-ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
-                     size_t count, loff_t *offset)
-{
-       struct drm_file *file_priv = filp->private_data;
-       struct vmw_private *dev_priv =
-               vmw_priv(file_priv->minor->dev);
-
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-       return drm_read(filp, buffer, count, offset);
-}
 
 }
 
 /**
- * vmw_irq_handler irq handler
+ * vmw_irq_handler: irq handler
  *
  * @irq: irq number
  * @arg: Closure argument. Pointer to a struct drm_device cast to void *
        uint32_t status, masked_status;
        irqreturn_t ret = IRQ_HANDLED;
 
-       status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+       status = vmw_irq_status_read(dev_priv);
        masked_status = status & READ_ONCE(dev_priv->irq_mask);
 
        if (likely(status))
-               outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+               vmw_irq_status_write(dev_priv, status);
 
        if (!status)
                return IRQ_NONE;
        return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 }
 
-void vmw_update_seqno(struct vmw_private *dev_priv,
-                        struct vmw_fifo_state *fifo_state)
+void vmw_update_seqno(struct vmw_private *dev_priv)
 {
-       uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+       uint32_t seqno = vmw_fence_read(dev_priv);
 
        if (dev_priv->last_read_seqno != seqno) {
                dev_priv->last_read_seqno = seqno;
 bool vmw_seqno_passed(struct vmw_private *dev_priv,
                         uint32_t seqno)
 {
-       struct vmw_fifo_state *fifo_state;
        bool ret;
 
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
                return true;
 
-       fifo_state = &dev_priv->fifo;
-       vmw_update_seqno(dev_priv, fifo_state);
+       vmw_update_seqno(dev_priv);
        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
                return true;
 
-       if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+       if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
            vmw_fifo_idle(dev_priv, seqno))
                return true;
 
                      bool interruptible,
                      unsigned long timeout)
 {
-       struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+       struct vmw_fifo_state *fifo_state = dev_priv->fifo;
 
        uint32_t count = 0;
        uint32_t signal_seq;
        }
        finish_wait(&dev_priv->fence_queue, &__wait);
        if (ret == 0 && fifo_idle)
-               vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
+               vmw_fence_write(dev_priv, signal_seq);
 
        wake_up_all(&dev_priv->fence_queue);
 out_err:
 {
        spin_lock_bh(&dev_priv->waiter_lock);
        if ((*waiter_count)++ == 0) {
-               outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+               vmw_irq_status_write(dev_priv, flag);
                dev_priv->irq_mask |= flag;
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
        }
                                  &dev_priv->goal_queue_waiters);
 }
 
-int vmw_wait_seqno(struct vmw_private *dev_priv,
-                     bool lazy, uint32_t seqno,
-                     bool interruptible, unsigned long timeout)
-{
-       long ret;
-       struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
-       if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
-               return 0;
-
-       if (likely(vmw_seqno_passed(dev_priv, seqno)))
-               return 0;
-
-       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
-       if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
-               return vmw_fallback_wait(dev_priv, lazy, true, seqno,
-                                        interruptible, timeout);
-
-       if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
-               return vmw_fallback_wait(dev_priv, lazy, false, seqno,
-                                        interruptible, timeout);
-
-       vmw_seqno_waiter_add(dev_priv);
-
-       if (interruptible)
-               ret = wait_event_interruptible_timeout
-                   (dev_priv->fence_queue,
-                    vmw_seqno_passed(dev_priv, seqno),
-                    timeout);
-       else
-               ret = wait_event_timeout
-                   (dev_priv->fence_queue,
-                    vmw_seqno_passed(dev_priv, seqno),
-                    timeout);
-
-       vmw_seqno_waiter_remove(dev_priv);
-
-       if (unlikely(ret == 0))
-               ret = -EBUSY;
-       else if (likely(ret > 0))
-               ret = 0;
-
-       return ret;
-}
-
 static void vmw_irq_preinstall(struct drm_device *dev)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        uint32_t status;
 
-       status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
-       outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+       status = vmw_irq_status_read(dev_priv);
+       vmw_irq_status_write(dev_priv, status);
 }
 
 void vmw_irq_uninstall(struct drm_device *dev)
 
        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 
-       status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
-       outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+       status = vmw_irq_status_read(dev_priv);
+       vmw_irq_status_write(dev_priv, status);
 
        dev->irq_enabled = false;
        free_irq(dev->irq, dev);
 
 
 void vmw_du_cleanup(struct vmw_display_unit *du)
 {
+       struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
        drm_plane_cleanup(&du->primary);
-       drm_plane_cleanup(&du->cursor);
+       if (vmw_cmd_supported(dev_priv))
+               drm_plane_cleanup(&du->cursor);
 
        drm_connector_unregister(&du->connector);
        drm_crtc_cleanup(&du->crtc);
        uint32_t count;
 
        spin_lock(&dev_priv->cursor_lock);
-       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
-       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
-       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
-       count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
-       vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+       if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+               vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+               vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+               vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+               count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+               vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+       } else {
+               vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+               vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+               vmw_write(dev_priv, SVGA_REG_CURSOR_ON, show ? 1 : 0);
+       }
        spin_unlock(&dev_priv->cursor_lock);
 }
 
 
 
 /**
- * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
+ * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
  *
  * @vps: plane state associated with the display surface
  * @unreference: true if we also want to unreference the display.
  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
  *
  * @plane: cursor plane
- * @new_state: info on the new plane state
+ * @state: info on the new plane state
  *
  * This is a chance to fail if the new cursor state does not fit
  * our requirements.
 {
        struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
 
-       if (dev_priv->active_display_unit == vmw_du_legacy)
+       if (dev_priv->active_display_unit == vmw_du_legacy &&
+           vmw_cmd_supported(dev_priv))
                return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
                                                color, clips, num_clips);
 
 }
 
 /**
- * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * vmw_kms_create_implicit_placement_property - Set up the implicit placement
  * property.
  *
  * @dev_priv: Pointer to a device private struct.
 
 
        drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
 
-       /* Initialize cursor plane */
-       ret = drm_universal_plane_init(dev, &ldu->base.cursor,
-                       0, &vmw_ldu_cursor_funcs,
-                       vmw_cursor_plane_formats,
-                       ARRAY_SIZE(vmw_cursor_plane_formats),
-                       NULL, DRM_PLANE_TYPE_CURSOR, NULL);
-       if (ret) {
-               DRM_ERROR("Failed to initialize cursor plane");
-               drm_plane_cleanup(&ldu->base.primary);
-               goto err_free;
-       }
+       /*
+        * We're going to be using traces and software cursors
+        */
+       if (vmw_cmd_supported(dev_priv)) {
+               /* Initialize cursor plane */
+               ret = drm_universal_plane_init(dev, &ldu->base.cursor,
+                                              0, &vmw_ldu_cursor_funcs,
+                                              vmw_cursor_plane_formats,
+                                              ARRAY_SIZE(vmw_cursor_plane_formats),
+                                              NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+               if (ret) {
+                       DRM_ERROR("Failed to initialize cursor plane");
+                       drm_plane_cleanup(&ldu->base.primary);
+                       goto err_free;
+               }
 
-       drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+               drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+       }
 
        ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
                                 DRM_MODE_CONNECTOR_VIRTUAL);
                goto err_free_encoder;
        }
 
-       ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
-                                       &ldu->base.cursor,
-                                       &vmw_legacy_crtc_funcs, NULL);
+       ret = drm_crtc_init_with_planes(
+                     dev, crtc, &ldu->base.primary,
+                     vmw_cmd_supported(dev_priv) ? &ldu->base.cursor : NULL,
+                     &vmw_legacy_crtc_funcs, NULL);
        if (ret) {
                DRM_ERROR("Failed to initialize CRTC\n");
                goto err_free_unregister;
 
 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
 {
        return (dev_priv->overlay_priv != NULL &&
-               ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+               ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
                 VMW_OVERLAY_CAP_MASK));
 }
 
 
 
 #include <linux/types.h>
 
-#define VMWGFX_INDEX_PORT     0x0
-#define VMWGFX_VALUE_PORT     0x1
-#define VMWGFX_IRQSTATUS_PORT 0x8
-
 struct svga_guest_mem_descriptor {
        u32 ppn;
        u32 num_pages;
 
 }
 
 /**
- * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
  * TTM user-space handle and perform basic type checks
  *
  * @dev_priv:     Pointer to a device private struct
 }
 
 /**
- * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * vmw_resource_dirty_update - Update a resource's dirty tracker with a
  * sequential range of touched backing store memory.
  * @res: The resource.
  * @start: The first page touched.
 
 }
 
 /**
- * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
+ * vmw_kms_sou_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
  *
  * @dev_priv: Pointer to the device private structure.
  * @framebuffer: Pointer to the buffer-object backed framebuffer.
 
 };
 
 /**
- * struct vmw_view - view define command body stub
+ * struct vmw_view_define - view define command body stub
  *
  * @view_id: The device id of the view being defined
  * @sid: The surface id of the view being defined
 
 }
 
 /**
- * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ * vmw_kms_stdu_surface_clip - Callback to encode a surface copy command cliprect
  *
  * @dirty: The closure structure.
  *
 }
 
 /**
- * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * vmw_kms_stdu_surface_fifo_commit - Callback to fill in and submit a surface
  * copy command.
  *
  * @dirty: The closure structure.
 /**
  * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
  * @plane: display plane
- * @old_state: Only used to get crtc info
+ * @state: Only used to get crtc info
  *
  * Formally update stdu->display_srf to the new plane, and bind the new
  * plane STDU.  This function is called during the commit phase when
 
 }
 
 /**
- * vmw_user_surface_free - User visible surface TTM base object destructor
+ * vmw_user_surface_base_release - User visible surface TTM base object destructor
  *
  * @p_base:         Pointer to a pointer to a TTM base object
  *                  embedded in a struct vmw_user_surface.
 }
 
 /**
- * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * vmw_surface_destroy_ioctl - Ioctl function implementing
  *                                  the user surface destroy functionality.
  *
  * @dev:            Pointer to a struct drm_device.
 }
 
 /**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_define_ioctl - Ioctl function implementing
  *                                  the user surface define functionality.
  *
  * @dev:            Pointer to a struct drm_device.
 }
 
 /**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_reference_ioctl - Ioctl function implementing
  *                                  the user surface reference functionality.
  *
  * @dev:            Pointer to a struct drm_device.
 }
 
 /**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * vmw_gb_surface_create - Encode a surface_define command.
  *
  * @res:        Pointer to a struct vmw_resource embedded in a struct
  *              vmw_surface.
 
 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
 
 /**
- * Helper functions to advance a struct vmw_piter iterator.
+ * __vmw_piter_non_sg_next: Helper functions to advance
+ * a struct vmw_piter iterator.
  *
  * @viter: Pointer to the iterator.
  *
 
 
 /**
- * Helper functions to return a pointer to the current page.
+ * __vmw_piter_non_sg_page: Helper functions to return a pointer
+ * to the current page.
  *
  * @viter: Pointer to the iterator
  *
 }
 
 /**
- * Helper functions to return the DMA address of the current page.
+ * __vmw_piter_phys_addr: Helper functions to return the DMA
+ * address of the current page.
  *
  * @viter: Pointer to the iterator
  *
 
 }
 
 /**
- * vmw_validation_cone - Commit validation actions after command submission
+ * vmw_validation_done - Commit validation actions after command submission
  * success.
  * @ctx: The validation context.
  * @fence: Fence with which to fence all buffer objects taking part in the