}
 
        device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
+       device->cache.port_state_cache[port - rdma_start_port(device)] =
+               tprops->state;
 
        write_unlock_irq(&device->cache.lock);
 
                                          (rdma_end_port(device) -
                                           rdma_start_port(device) + 1),
                                          GFP_KERNEL);
-       if (!device->cache.pkey_cache ||
+       device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
+                                         (rdma_end_port(device) -
+                                          rdma_start_port(device) + 1),
+                                         GFP_KERNEL);
+       if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
            !device->cache.lmc_cache) {
                err = -ENOMEM;
                goto free;
 free:
        kfree(device->cache.pkey_cache);
        kfree(device->cache.lmc_cache);
+       kfree(device->cache.port_state_cache);
        return err;
 }
 
        gid_table_release_one(device);
        kfree(device->cache.pkey_cache);
        kfree(device->cache.lmc_cache);
+       kfree(device->cache.port_state_cache);
 }
 
 void ib_cache_cleanup_one(struct ib_device *device)
 
        struct ib_pkey_cache  **pkey_cache;
        struct ib_gid_table   **gid_cache;
        u8                     *lmc_cache;
+       enum ib_port_state     *port_state_cache;
 };
 
 struct ib_dma_mapping_ops {