u8         vhca_resource_manager[0x1];
 
        u8         hca_cap_2[0x1];
-       u8         reserved_at_21[0x2];
+       u8         reserved_at_21[0x1];
+       u8         dtor[0x1];
        u8         event_on_vhca_state_teardown_request[0x1];
        u8         event_on_vhca_state_in_use[0x1];
        u8         event_on_vhca_state_active[0x1];
        u8         reserved_at_0[0xe0];
 };
 
+struct mlx5_ifc_default_timeout_bits {
+       u8         to_multiplier[0x3];
+       u8         reserved_at_3[0x9];
+       u8         to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+       u8         reserved_at_0[0x20];
+
+       struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+       u8         reserved_at_40[0x60];
+
+       struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+       struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+       struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+       struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+       struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+       struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+       struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+       struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+       struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+       u8         reserved_at_1c0[0x40];
+};
+
 enum {
        MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
        MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,