* various states of availability.
  */
 LIST_HEAD(opp_tables);
+
+/* OPP tables with uninitialized required OPPs */
+LIST_HEAD(lazy_opp_tables);
+
 /* Lock to allow exclusive modification to the device and opp lists */
 DEFINE_MUTEX(opp_table_lock);
 /* Flag indicating that opp_tables list is being updated at the moment */
                return 0;
        }
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp->opp_table))
+               return 0;
+
        return opp->required_opps[index]->pstate;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
        if (!required_opp_tables)
                return 0;
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp_table))
+               return -EBUSY;
+
        /* Single genpd case */
        if (!genpd_virt_devs)
                return _set_required_opp(dev, dev, opp, 0);
        mutex_init(&opp_table->lock);
        mutex_init(&opp_table->genpd_virt_dev_lock);
        INIT_LIST_HEAD(&opp_table->dev_list);
+       INIT_LIST_HEAD(&opp_table->lazy);
 
        /* Mark regulator count uninitialized */
        opp_table->regulator_count = -1;
        return 0;
 }
 
+void _required_opps_available(struct dev_pm_opp *opp, int count)
+{
+       int i;
+
+       for (i = 0; i < count; i++) {
+               if (opp->required_opps[i]->available)
+                       continue;
+
+               opp->available = false;
+               pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n",
+                        __func__, opp->required_opps[i]->np, opp->rate);
+               return;
+       }
+}
+
 /*
  * Returns:
  * 0: On success. And appropriate error message for duplicate OPPs.
             struct opp_table *opp_table, bool rate_not_available)
 {
        struct list_head *head;
-       unsigned int i;
        int ret;
 
        mutex_lock(&opp_table->lock);
                         __func__, new_opp->rate);
        }
 
-       for (i = 0; i < opp_table->required_opp_count; i++) {
-               if (new_opp->required_opps[i]->available)
-                       continue;
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(opp_table))
+               return 0;
 
-               new_opp->available = false;
-               dev_warn(dev, "%s: OPP not supported by required OPP %pOF (%lu)\n",
-                        __func__, new_opp->required_opps[i]->np, new_opp->rate);
-               break;
-       }
+       _required_opps_available(new_opp, opp_table->required_opp_count);
 
        return 0;
 }
        if (!src_table || !src_table->required_opp_count)
                return pstate;
 
+       /* required-opps not fully initialized yet */
+       if (lazy_linking_pending(src_table))
+               return -EBUSY;
+
        for (i = 0; i < src_table->required_opp_count; i++) {
                if (src_table->required_opp_tables[i]->np == dst_table->np)
                        break;
 
 
        for (i = 0; i < opp_table->required_opp_count; i++) {
                if (IS_ERR_OR_NULL(required_opp_tables[i]))
-                       break;
+                       continue;
 
                dev_pm_opp_put_opp_table(required_opp_tables[i]);
        }
 
        opp_table->required_opp_count = 0;
        opp_table->required_opp_tables = NULL;
+       list_del(&opp_table->lazy);
 }
 
 /*
 {
        struct opp_table **required_opp_tables;
        struct device_node *required_np, *np;
+       bool lazy = false;
        int count, i;
 
        /* Traversing the first OPP node is all we need */
                required_opp_tables[i] = _find_table_of_opp_np(required_np);
                of_node_put(required_np);
 
-               if (IS_ERR(required_opp_tables[i]))
-                       goto free_required_tables;
+               if (IS_ERR(required_opp_tables[i])) {
+                       lazy = true;
+                       continue;
+               }
 
                /*
                 * We only support genpd's OPPs in the "required-opps" for now,
                }
        }
 
+       /* Let's do the linking later on */
+       if (lazy)
+               list_add(&opp_table->lazy, &lazy_opp_tables);
+
        goto put_np;
 
 free_required_tables:
 
        for (i = 0; i < opp_table->required_opp_count; i++) {
                if (!required_opps[i])
-                       break;
+                       continue;
 
                /* Put the reference back */
                dev_pm_opp_put(required_opps[i]);
        }
 
-       kfree(required_opps);
        opp->required_opps = NULL;
+       kfree(required_opps);
 }
 
 /* Populate all required OPPs which are part of "required-opps" list */
        for (i = 0; i < count; i++) {
                required_table = opp_table->required_opp_tables[i];
 
+               /* Required table not added yet, we will link later */
+               if (IS_ERR_OR_NULL(required_table))
+                       continue;
+
                np = of_parse_required_opp(opp->np, i);
                if (unlikely(!np)) {
                        ret = -ENODEV;
        return ret;
 }
 
+/* Link required OPPs for an individual OPP */
+static int lazy_link_required_opps(struct opp_table *opp_table,
+                                  struct opp_table *new_table, int index)
+{
+       struct device_node *required_np;
+       struct dev_pm_opp *opp;
+
+       list_for_each_entry(opp, &opp_table->opp_list, node) {
+               required_np = of_parse_required_opp(opp->np, index);
+               if (unlikely(!required_np))
+                       return -ENODEV;
+
+               opp->required_opps[index] = _find_opp_of_np(new_table, required_np);
+               of_node_put(required_np);
+
+               if (!opp->required_opps[index]) {
+                       pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
+                              __func__, opp->np, index);
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
+/* Link required OPPs for all OPPs of the newly added OPP table */
+static void lazy_link_required_opp_table(struct opp_table *new_table)
+{
+       struct opp_table *opp_table, *temp, **required_opp_tables;
+       struct device_node *required_np, *opp_np, *required_table_np;
+       struct dev_pm_opp *opp;
+       int i, ret;
+
+       /*
+        * We only support genpd's OPPs in the "required-opps" for now,
+        * as we don't know much about other cases.
+        */
+       if (!new_table->is_genpd)
+               return;
+
+       mutex_lock(&opp_table_lock);
+
+       list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
+               bool lazy = false;
+
+               /* opp_np can't be invalid here */
+               opp_np = of_get_next_available_child(opp_table->np, NULL);
+
+               for (i = 0; i < opp_table->required_opp_count; i++) {
+                       required_opp_tables = opp_table->required_opp_tables;
+
+                       /* Required opp-table is already parsed */
+                       if (!IS_ERR(required_opp_tables[i]))
+                               continue;
+
+                       /* required_np can't be invalid here */
+                       required_np = of_parse_required_opp(opp_np, i);
+                       required_table_np = of_get_parent(required_np);
+
+                       of_node_put(required_table_np);
+                       of_node_put(required_np);
+
+                       /*
+                        * Newly added table isn't the required opp-table for
+                        * opp_table.
+                        */
+                       if (required_table_np != new_table->np) {
+                               lazy = true;
+                               continue;
+                       }
+
+                       required_opp_tables[i] = new_table;
+                       _get_opp_table_kref(new_table);
+
+                       /* Link OPPs now */
+                       ret = lazy_link_required_opps(opp_table, new_table, i);
+                       if (ret) {
+                               /* The OPPs will be marked unusable */
+                               lazy = false;
+                               break;
+                       }
+               }
+
+               of_node_put(opp_np);
+
+               /* All required opp-tables found, remove from lazy list */
+               if (!lazy) {
+                       list_del(&opp_table->lazy);
+                       INIT_LIST_HEAD(&opp_table->lazy);
+
+                       list_for_each_entry(opp, &opp_table->opp_list, node)
+                               _required_opps_available(opp, opp_table->required_opp_count);
+               }
+       }
+
+       mutex_unlock(&opp_table_lock);
+}
+
 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
 {
        struct device_node *np, *opp_np;
                }
        }
 
+       lazy_link_required_opp_table(opp_table);
+
        return 0;
 
 remove_static_opp:
 
 /* Lock to allow exclusive modification to the device and opp lists */
 extern struct mutex opp_table_lock;
 
-extern struct list_head opp_tables;
+extern struct list_head opp_tables, lazy_opp_tables;
 
 /*
  * Internal data structure organization with the OPP layer library is as
  * meant for book keeping and private to OPP library.
  */
 struct opp_table {
-       struct list_head node;
+       struct list_head node, lazy;
 
        struct blocking_notifier_head head;
        struct list_head dev_list;
 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk);
 void _put_opp_list_kref(struct opp_table *opp_table);
+void _required_opps_available(struct dev_pm_opp *opp, int count);
+
+static inline bool lazy_linking_pending(struct opp_table *opp_table)
+{
+       return unlikely(!list_empty(&opp_table->lazy));
+}
 
 #ifdef CONFIG_OF
 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);