static DEFINE_SPINLOCK(clock_lock);
 static DEFINE_MUTEX(clock_list_sem);
 
+/* clock disable operations are not passed on to hardware during boot */
+static int allow_disable;
+
 void clk_rate_table_build(struct clk *clk,
                          struct cpufreq_frequency_table *freq_table,
                          int nr_freqs,
                return;
 
        if (!(--clk->usecount)) {
-               if (likely(clk->ops && clk->ops->disable))
+               if (likely(allow_disable && clk->ops && clk->ops->disable))
                        clk->ops->disable(clk);
                if (likely(clk->parent))
                        __clk_disable(clk->parent);
        return err;
 }
 late_initcall(clk_debugfs_init);
+
+static int __init clk_late_init(void)
+{
+       unsigned long flags;
+       struct clk *clk;
+
+       /* disable all clocks with zero use count */
+       mutex_lock(&clock_list_sem);
+       spin_lock_irqsave(&clock_lock, flags);
+
+       list_for_each_entry(clk, &clock_list, node)
+               if (!clk->usecount && clk->ops && clk->ops->disable)
+                       clk->ops->disable(clk);
+
+       /* from now on allow clock disable operations */
+       allow_disable = 1;
+
+       spin_unlock_irqrestore(&clock_lock, flags);
+       mutex_unlock(&clock_list_sem);
+       return 0;
+}
+late_initcall(clk_late_init);