return ret;
 }
 
+/**
+ * vgic_its_save_device_tables - Save the device table and all ITT
+ * into guest RAM
+ */
+static int vgic_its_save_device_tables(struct vgic_its *its)
+{
+       return -ENXIO;
+}
+
+/**
+ * vgic_its_restore_device_tables - Restore the device table and all ITT
+ * from guest RAM to internal data structs
+ */
+static int vgic_its_restore_device_tables(struct vgic_its *its)
+{
+       return -ENXIO;
+}
+
+/**
+ * vgic_its_save_collection_table - Save the collection table into
+ * guest RAM
+ */
+static int vgic_its_save_collection_table(struct vgic_its *its)
+{
+       return -ENXIO;
+}
+
+/**
+ * vgic_its_restore_collection_table - reads the collection table
+ * in guest memory and restores the ITS internal state. Requires the
+ * BASER registers to be restored before.
+ */
+static int vgic_its_restore_collection_table(struct vgic_its *its)
+{
+       return -ENXIO;
+}
+
 /**
  * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
  * according to v0 ABI
  */
 static int vgic_its_save_tables_v0(struct vgic_its *its)
 {
-       return -ENXIO;
+       struct kvm *kvm = its->dev->kvm;
+       int ret;
+
+       mutex_lock(&kvm->lock);
+       mutex_lock(&its->its_lock);
+
+       if (!lock_all_vcpus(kvm)) {
+               mutex_unlock(&its->its_lock);
+               mutex_unlock(&kvm->lock);
+               return -EBUSY;
+       }
+
+       ret = vgic_its_save_device_tables(its);
+       if (ret)
+               goto out;
+
+       ret = vgic_its_save_collection_table(its);
+
+out:
+       unlock_all_vcpus(kvm);
+       mutex_unlock(&its->its_lock);
+       mutex_unlock(&kvm->lock);
+       return ret;
 }
 
 /**
  */
 static int vgic_its_restore_tables_v0(struct vgic_its *its)
 {
-       return -ENXIO;
+       struct kvm *kvm = its->dev->kvm;
+       int ret;
+
+       mutex_lock(&kvm->lock);
+       mutex_lock(&its->its_lock);
+
+       if (!lock_all_vcpus(kvm)) {
+               mutex_unlock(&its->its_lock);
+               mutex_unlock(&kvm->lock);
+               return -EBUSY;
+       }
+
+       ret = vgic_its_restore_collection_table(its);
+       if (ret)
+               goto out;
+
+       ret = vgic_its_restore_device_tables(its);
+
+out:
+       unlock_all_vcpus(kvm);
+       mutex_unlock(&its->its_lock);
+       mutex_unlock(&kvm->lock);
+
+       if (ret)
+               return ret;
+
+       /*
+        * On restore path, MSI injections can happen before the
+        * first VCPU run so let's complete the GIC init here.
+        */
+       return kvm_vgic_map_resources(its->dev->kvm);
 }
 
 static int vgic_its_commit_v0(struct vgic_its *its)
                switch (attr->attr) {
                case KVM_DEV_ARM_VGIC_CTRL_INIT:
                        return 0;
+               case KVM_DEV_ARM_ITS_SAVE_TABLES:
+                       return 0;
+               case KVM_DEV_ARM_ITS_RESTORE_TABLES:
+                       return 0;
                }
                break;
        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
 
                return 0;
        }
-       case KVM_DEV_ARM_VGIC_GRP_CTRL:
+       case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+               const struct vgic_its_abi *abi = vgic_its_get_abi(its);
+
                switch (attr->attr) {
                case KVM_DEV_ARM_VGIC_CTRL_INIT:
                        its->initialized = true;
 
                        return 0;
+               case KVM_DEV_ARM_ITS_SAVE_TABLES:
+                       return abi->save_tables(its);
+               case KVM_DEV_ARM_ITS_RESTORE_TABLES:
+                       return abi->restore_tables(its);
                }
-               break;
+       }
        case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
                u64 __user *uaddr = (u64 __user *)(long)attr->addr;
                u64 reg;