uint64_t        faults;
 };
 
+enum {
+       HMM_PRIVATE_DEVICE_ONE,
+       HMM_PRIVATE_DEVICE_TWO,
+       HMM_COHERENCE_DEVICE_ONE,
+       HMM_COHERENCE_DEVICE_TWO,
+};
+
 #define TWOMEG         (1 << 21)
 #define HMM_BUFFER_SIZE (1024 << 12)
 #define HMM_PATH_MAX    64
        unsigned int    page_shift;
 };
 
+FIXTURE_VARIANT(hmm)
+{
+       int     device_number;
+};
+
+FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
+{
+       .device_number = HMM_PRIVATE_DEVICE_ONE,
+};
+
+FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
+{
+       .device_number = HMM_COHERENCE_DEVICE_ONE,
+};
+
 FIXTURE(hmm2)
 {
        int             fd0;
        unsigned int    page_shift;
 };
 
+FIXTURE_VARIANT(hmm2)
+{
+       int     device_number0;
+       int     device_number1;
+};
+
+FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
+{
+       .device_number0 = HMM_PRIVATE_DEVICE_ONE,
+       .device_number1 = HMM_PRIVATE_DEVICE_TWO,
+};
+
+FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
+{
+       .device_number0 = HMM_COHERENCE_DEVICE_ONE,
+       .device_number1 = HMM_COHERENCE_DEVICE_TWO,
+};
+
 static int hmm_open(int unit)
 {
        char pathname[HMM_PATH_MAX];
        return fd;
 }
 
+static bool hmm_is_coherent_type(int dev_num)
+{
+       return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
+}
+
 FIXTURE_SETUP(hmm)
 {
        self->page_size = sysconf(_SC_PAGE_SIZE);
        self->page_shift = ffs(self->page_size) - 1;
 
-       self->fd = hmm_open(0);
+       self->fd = hmm_open(variant->device_number);
+       if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
+               SKIP(exit(0), "DEVICE_COHERENT not available");
        ASSERT_GE(self->fd, 0);
 }
 
        self->page_size = sysconf(_SC_PAGE_SIZE);
        self->page_shift = ffs(self->page_size) - 1;
 
-       self->fd0 = hmm_open(0);
+       self->fd0 = hmm_open(variant->device_number0);
+       if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
+               SKIP(exit(0), "DEVICE_COHERENT not available");
        ASSERT_GE(self->fd0, 0);
-       self->fd1 = hmm_open(1);
+       self->fd1 = hmm_open(variant->device_number1);
        ASSERT_GE(self->fd1, 0);
 }
 
        nanosleep(&t, NULL);
 }
 
+static int hmm_migrate_sys_to_dev(int fd,
+                                  struct hmm_buffer *buffer,
+                                  unsigned long npages)
+{
+       return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
+}
+
+static int hmm_migrate_dev_to_sys(int fd,
+                                  struct hmm_buffer *buffer,
+                                  unsigned long npages)
+{
+       return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
+}
+
 /*
  * Simple NULL test of device open/close.
  */
                ptr[i] = i;
 
        /* Migrate memory to device. */
-       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, npages);
 
                ptr[i] = i;
 
        /* Migrate memory to device. */
-       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, npages);
 
                ASSERT_EQ(ptr[i], i);
 
        /* Migrate memory to the device again. */
-       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, npages);
 
        ASSERT_NE(buffer->ptr, MAP_FAILED);
 
        /* Migrate memory to device. */
-       ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
        ASSERT_EQ(ret, -ENOENT);
 
        hmm_buffer_free(buffer);
        p = buffer->ptr;
 
        /* Migrating a protected area should be an error. */
-       ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, npages);
+       ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
        ASSERT_EQ(ret, -EINVAL);
 
        /* Punch a hole after the first page address. */
        ASSERT_EQ(ret, 0);
 
        /* We expect an error if the vma doesn't cover the range. */
-       ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 3);
+       ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
        ASSERT_EQ(ret, -EINVAL);
 
        /* Page 2 will be a read-only zero page. */
 
        /* Now try to migrate pages 2-5 to device 1. */
        buffer->ptr = p + 2 * self->page_size;
-       ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 4);
+       ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, 4);
 
        /* Page 5 won't be migrated to device 0 because it's on device 1. */
        buffer->ptr = p + 5 * self->page_size;
-       ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
+       ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
        ASSERT_EQ(ret, -ENOENT);
        buffer->ptr = p;
 
 }
 
 /*
- * Migrate anonymous memory to device private memory and fault it back to system
- * memory multiple times.
+ * Migrate anonymous memory to device memory and back to system memory
+ * multiple times. In case of private zone configuration, this is done
+ * through fault pages accessed by CPU. In case of coherent zone configuration,
+ * the pages from the device should be explicitly migrated back to system memory.
+ * The reason is Coherent device zone has coherent access by CPU, therefore
+ * it will not generate any page fault.
  */
 TEST_F(hmm, migrate_multiple)
 {
                        ptr[i] = i;
 
                /* Migrate memory to device. */
-               ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_MIGRATE, buffer,
-                                     npages);
+               ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
                ASSERT_EQ(ret, 0);
                ASSERT_EQ(buffer->cpages, npages);
 
                for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
                        ASSERT_EQ(ptr[i], i);
 
-               /* Fault pages back to system memory and check them. */
+               /* Migrate back to system memory and check them. */
+               if (hmm_is_coherent_type(variant->device_number)) {
+                       ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
+                       ASSERT_EQ(ret, 0);
+                       ASSERT_EQ(buffer->cpages, npages);
+               }
+
                for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
                        ASSERT_EQ(ptr[i], i);
 
 
        /* Page 5 will be migrated to device 0. */
        buffer->ptr = p + 5 * self->page_size;
-       ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_MIGRATE, buffer, 1);
+       ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, 1);
 
        /* Page 6 will be migrated to device 1. */
        buffer->ptr = p + 6 * self->page_size;
-       ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_MIGRATE, buffer, 1);
+       ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
        ASSERT_EQ(ret, 0);
        ASSERT_EQ(buffer->cpages, 1);
 
        ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
        ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
        ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
-       ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
-                       HMM_DMIRROR_PROT_WRITE);
-       ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
+       if (!hmm_is_coherent_type(variant->device_number0)) {
+               ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
+                               HMM_DMIRROR_PROT_WRITE);
+               ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
+       } else {
+               ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
+                               HMM_DMIRROR_PROT_WRITE);
+               ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
+                               HMM_DMIRROR_PROT_WRITE);
+       }
 
        hmm_buffer_free(buffer);
 }