#include "smb2proto.h"
 #include "cached_dir.h"
 
-struct cached_fid *init_cached_dir(const char *path);
+static struct cached_fid *init_cached_dir(const char *path);
+static void free_cached_dir(struct cached_fid *cfid);
+
+static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+                                                   const char *path,
+                                                   bool lookup_only)
+{
+       struct cached_fid *cfid;
+
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry(cfid, &cfids->entries, entry) {
+               if (!strcmp(cfid->path, path)) {
+                       /*
+                        * If it doesn't have a lease it is either not yet
+                        * fully cached or it may be in the process of
+                        * being deleted due to a lease break.
+                        */
+                       if (!cfid->has_lease) {
+                               spin_unlock(&cfids->cfid_list_lock);
+                               return NULL;
+                       }
+                       kref_get(&cfid->refcount);
+                       spin_unlock(&cfids->cfid_list_lock);
+                       return cfid;
+               }
+       }
+       if (lookup_only) {
+               spin_unlock(&cfids->cfid_list_lock);
+               return NULL;
+       }
+       if (cfids->num_entries >= MAX_CACHED_FIDS) {
+               spin_unlock(&cfids->cfid_list_lock);
+               return NULL;
+       }
+       cfid = init_cached_dir(path);
+       if (cfid == NULL) {
+               spin_unlock(&cfids->cfid_list_lock);
+               return NULL;
+       }
+       cfid->cfids = cfids;
+       cfids->num_entries++;
+       list_add(&cfid->entry, &cfids->entries);
+       cfid->on_list = true;
+       kref_get(&cfid->refcount);
+       spin_unlock(&cfids->cfid_list_lock);
+       return cfid;
+}
 
 /*
  * Open the and cache a directory handle.
        struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
        struct kvec qi_iov[1];
        int rc, flags = 0;
-       __le16 utf16_path = 0; /* Null - since an open of top of share */
+       __le16 *utf16_path = NULL;
        u8 oplock = SMB2_OPLOCK_LEVEL_II;
        struct cifs_fid *pfid;
-       struct dentry *dentry;
+       struct dentry *dentry = NULL;
        struct cached_fid *cfid;
+       struct cached_fids *cfids;
 
-       if (tcon == NULL || tcon->nohandlecache ||
+
+       if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
            is_smb1_server(tcon->ses->server))
                return -EOPNOTSUPP;
 
        ses = tcon->ses;
        server = ses->server;
+       cfids = tcon->cfids;
+
+       if (!server->ops->new_lease_key)
+               return -EIO;
 
        if (cifs_sb->root == NULL)
                return -ENOENT;
 
+       /*
+        * TODO: for better caching we need to find and use the dentry also
+        * for non-root directories.
+        */
        if (!path[0])
                dentry = cifs_sb->root;
-       else
-               return -ENOENT;
 
-       cfid = tcon->cfids->cfid;
-       if (cfid == NULL) {
-               cfid = init_cached_dir(path);
-               tcon->cfids->cfid = cfid;
-       }
-       if (cfid == NULL)
+       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+       if (!utf16_path)
                return -ENOMEM;
 
-       mutex_lock(&cfid->fid_mutex);
-       if (cfid->is_valid) {
-               cifs_dbg(FYI, "found a cached root file handle\n");
+       cfid = find_or_create_cached_dir(cfids, path, lookup_only);
+       if (cfid == NULL) {
+               kfree(utf16_path);
+               return -ENOENT;
+       }
+       /*
+        * At this point we either have a lease already and we can just
+        * return it. If not we are guaranteed to be the only thread accessing
+        * this cfid.
+        */
+       if (cfid->has_lease) {
                *ret_cfid = cfid;
-               kref_get(&cfid->refcount);
-               mutex_unlock(&cfid->fid_mutex);
+               kfree(utf16_path);
                return 0;
        }
 
        /*
         * We do not hold the lock for the open because in case
-        * SMB2_open needs to reconnect, it will end up calling
-        * cifs_mark_open_files_invalid() which takes the lock again
-        * thus causing a deadlock
+        * SMB2_open needs to reconnect.
+        * This is safe because no other thread will be able to get a ref
+        * to the cfid until we have finished opening the file and (possibly)
+        * acquired a lease.
         */
-       mutex_unlock(&cfid->fid_mutex);
-
-       if (lookup_only)
-               return -ENOENT;
-
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       if (!server->ops->new_lease_key)
-               return -EIO;
-
        pfid = &cfid->fid;
        server->ops->new_lease_key(pfid);
 
        oparms.reconnect = false;
 
        rc = SMB2_open_init(tcon, server,
-                           &rqst[0], &oplock, &oparms, &utf16_path);
+                           &rqst[0], &oplock, &oparms, utf16_path);
        if (rc)
                goto oshr_free;
        smb2_set_next_command(tcon, &rqst[0]);
        rc = compound_send_recv(xid, ses, server,
                                flags, 2, rqst,
                                resp_buftype, rsp_iov);
-       mutex_lock(&cfid->fid_mutex);
-
-       /*
-        * Now we need to check again as the cached root might have
-        * been successfully re-opened from a concurrent process
-        */
-
-       if (cfid->is_valid) {
-               /* work was already done */
-
-               /* stash fids for close() later */
-               struct cifs_fid fid = {
-                       .persistent_fid = pfid->persistent_fid,
-                       .volatile_fid = pfid->volatile_fid,
-               };
-
-               /*
-                * caller expects this func to set the fid in cfid to valid
-                * cached root, so increment the refcount.
-                */
-               kref_get(&cfid->refcount);
-
-               mutex_unlock(&cfid->fid_mutex);
-
-               if (rc == 0) {
-                       /* close extra handle outside of crit sec */
-                       SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
-               }
-               rc = 0;
-               goto oshr_free;
-       }
-
-       /* Cached root is still invalid, continue normaly */
-
        if (rc) {
                if (rc == -EREMCHG) {
                        tcon->need_reconnect = true;
                        pr_warn_once("server share %s deleted\n",
                                     tcon->tree_name);
                }
-               goto oshr_exit;
+               goto oshr_free;
        }
 
        atomic_inc(&tcon->num_remote_opens);
 #endif /* CIFS_DEBUG2 */
 
        cfid->tcon = tcon;
-       cfid->is_valid = true;
-       cfid->dentry = dentry;
-       if (dentry)
+       if (dentry) {
+               cfid->dentry = dentry;
                dget(dentry);
-       kref_init(&cfid->refcount);
-
+       }
        /* BB TBD check to see if oplock level check can be removed below */
-       if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
-               /*
-                * See commit 2f94a3125b87. Increment the refcount when we
-                * get a lease for root, release it if lease break occurs
-                */
-               kref_get(&cfid->refcount);
-               cfid->has_lease = true;
-               smb2_parse_contexts(server, o_rsp,
-                               &oparms.fid->epoch,
-                                   oparms.fid->lease_key, &oplock,
-                                   NULL, NULL);
-       } else
-               goto oshr_exit;
+       if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+               goto oshr_free;
+
+
+       smb2_parse_contexts(server, o_rsp,
+                           &oparms.fid->epoch,
+                           oparms.fid->lease_key, &oplock,
+                           NULL, NULL);
 
        qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
        if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
-               goto oshr_exit;
+               goto oshr_free;
        if (!smb2_validate_and_copy_iov(
                                le16_to_cpu(qi_rsp->OutputBufferOffset),
                                sizeof(struct smb2_file_all_info),
                                &rsp_iov[1], sizeof(struct smb2_file_all_info),
                                (char *)&cfid->file_all_info))
                cfid->file_all_info_is_valid = true;
-
        cfid->time = jiffies;
+       cfid->is_open = true;
+       cfid->has_lease = true;
 
-oshr_exit:
-       mutex_unlock(&cfid->fid_mutex);
 oshr_free:
+       kfree(utf16_path);
        SMB2_open_free(&rqst[0]);
        SMB2_query_info_free(&rqst[1]);
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+       spin_lock(&cfids->cfid_list_lock);
+       if (!cfid->has_lease) {
+               if (cfid->on_list) {
+                       list_del(&cfid->entry);
+                       cfid->on_list = false;
+                       cfids->num_entries--;
+               }
+               rc = -ENOENT;
+       }
+       spin_unlock(&cfids->cfid_list_lock);
+       if (rc) {
+               free_cached_dir(cfid);
+               cfid = NULL;
+       }
+
        if (rc == 0)
                *ret_cfid = cfid;
 
                              struct cached_fid **ret_cfid)
 {
        struct cached_fid *cfid;
+       struct cached_fids *cfids = tcon->cfids;
 
-       cfid = tcon->cfids->cfid;
-       if (cfid == NULL)
+       if (cfids == NULL)
                return -ENOENT;
 
-       mutex_lock(&cfid->fid_mutex);
-       if (cfid->dentry == dentry) {
-               cifs_dbg(FYI, "found a cached root file handle by dentry\n");
-               *ret_cfid = cfid;
-               kref_get(&cfid->refcount);
-               mutex_unlock(&cfid->fid_mutex);
-               return 0;
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry(cfid, &cfids->entries, entry) {
+               if (dentry && cfid->dentry == dentry) {
+                       cifs_dbg(FYI, "found a cached root file handle by dentry\n");
+                       kref_get(&cfid->refcount);
+                       *ret_cfid = cfid;
+                       spin_unlock(&cfids->cfid_list_lock);
+                       return 0;
+               }
        }
-       mutex_unlock(&cfid->fid_mutex);
+       spin_unlock(&cfids->cfid_list_lock);
        return -ENOENT;
 }
 
 {
        struct cached_fid *cfid = container_of(ref, struct cached_fid,
                                               refcount);
-       struct cached_dirent *dirent, *q;
 
-       if (cfid->is_valid) {
-               cifs_dbg(FYI, "clear cached root file handle\n");
-               SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
-                          cfid->fid.volatile_fid);
+       spin_lock(&cfid->cfids->cfid_list_lock);
+       if (cfid->on_list) {
+               list_del(&cfid->entry);
+               cfid->on_list = false;
+               cfid->cfids->num_entries--;
        }
+       spin_unlock(&cfid->cfids->cfid_list_lock);
 
-       /*
-        * We only check validity above to send SMB2_close,
-        * but we still need to invalidate these entries
-        * when this function is called
-        */
-       cfid->is_valid = false;
-       cfid->file_all_info_is_valid = false;
-       cfid->has_lease = false;
-       if (cfid->dentry) {
-               dput(cfid->dentry);
-               cfid->dentry = NULL;
-       }
-       /*
-        * Delete all cached dirent names
-        */
-       mutex_lock(&cfid->dirents.de_mutex);
-       list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
-               list_del(&dirent->entry);
-               kfree(dirent->name);
-               kfree(dirent);
+       dput(cfid->dentry);
+       cfid->dentry = NULL;
+
+       if (cfid->is_open) {
+               SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+                          cfid->fid.volatile_fid);
        }
-       cfid->dirents.is_valid = 0;
-       cfid->dirents.is_failed = 0;
-       cfid->dirents.ctx = NULL;
-       cfid->dirents.pos = 0;
-       mutex_unlock(&cfid->dirents.de_mutex);
 
+       free_cached_dir(cfid);
 }
 
 void close_cached_dir(struct cached_fid *cfid)
 {
-       mutex_lock(&cfid->fid_mutex);
        kref_put(&cfid->refcount, smb2_close_cached_fid);
-       mutex_unlock(&cfid->fid_mutex);
-}
-
-void close_cached_dir_lease_locked(struct cached_fid *cfid)
-{
-       if (cfid->has_lease) {
-               cfid->has_lease = false;
-               kref_put(&cfid->refcount, smb2_close_cached_fid);
-       }
-}
-
-void close_cached_dir_lease(struct cached_fid *cfid)
-{
-       mutex_lock(&cfid->fid_mutex);
-       close_cached_dir_lease_locked(cfid);
-       mutex_unlock(&cfid->fid_mutex);
 }
 
 /*
        struct cached_fid *cfid;
        struct cifs_tcon *tcon;
        struct tcon_link *tlink;
+       struct cached_fids *cfids;
 
        for (node = rb_first(root); node; node = rb_next(node)) {
                tlink = rb_entry(node, struct tcon_link, tl_rbnode);
                tcon = tlink_tcon(tlink);
                if (IS_ERR(tcon))
                        continue;
-               cfid = tcon->cfids->cfid;
-               if (cfid == NULL)
+               cfids = tcon->cfids;
+               if (cfids == NULL)
                        continue;
-               mutex_lock(&cfid->fid_mutex);
-               if (cfid->dentry) {
+               list_for_each_entry(cfid, &cfids->entries, entry) {
                        dput(cfid->dentry);
                        cfid->dentry = NULL;
                }
-               mutex_unlock(&cfid->fid_mutex);
        }
 }
 
 /*
- * Invalidate and close all cached dirs when a TCON has been reset
+ * Invalidate all cached dirs when a TCON has been reset
  * due to a session loss.
  */
 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
 {
-       struct cached_fid *cfid = tcon->cfids->cfid;
-
-       if (cfid == NULL)
-               return;
-
-       mutex_lock(&cfid->fid_mutex);
-       cfid->is_valid = false;
-       /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
-       close_cached_dir_lease_locked(cfid);
-       memset(&cfid->fid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&cfid->fid_mutex);
+       struct cached_fids *cfids = tcon->cfids;
+       struct cached_fid *cfid, *q;
+       struct list_head entry;
+
+       INIT_LIST_HEAD(&entry);
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+               list_del(&cfid->entry);
+               list_add(&cfid->entry, &entry);
+               cfids->num_entries--;
+               cfid->is_open = false;
+               /* To prevent race with smb2_cached_lease_break() */
+               kref_get(&cfid->refcount);
+       }
+       spin_unlock(&cfids->cfid_list_lock);
+
+       list_for_each_entry_safe(cfid, q, &entry, entry) {
+               cfid->on_list = false;
+               list_del(&cfid->entry);
+               cancel_work_sync(&cfid->lease_break);
+               if (cfid->has_lease) {
+                       /*
+                        * We lease was never cancelled from the server so we
+                        * need to drop the reference.
+                        */
+                       spin_lock(&cfids->cfid_list_lock);
+                       cfid->has_lease = false;
+                       spin_unlock(&cfids->cfid_list_lock);
+                       kref_put(&cfid->refcount, smb2_close_cached_fid);
+               }
+               /* Drop the extra reference opened above*/
+               kref_put(&cfid->refcount, smb2_close_cached_fid);
+       }
 }
 
 static void
        struct cached_fid *cfid = container_of(work,
                                struct cached_fid, lease_break);
 
-       close_cached_dir_lease(cfid);
+       spin_lock(&cfid->cfids->cfid_list_lock);
+       cfid->has_lease = false;
+       spin_unlock(&cfid->cfids->cfid_list_lock);
+       kref_put(&cfid->refcount, smb2_close_cached_fid);
 }
 
 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
 {
-       struct cached_fid *cfid = tcon->cfids->cfid;
+       struct cached_fids *cfids = tcon->cfids;
+       struct cached_fid *cfid;
 
-       if (cfid == NULL)
+       if (cfids == NULL)
                return false;
 
-       if (cfid->is_valid &&
-           !memcmp(lease_key,
-                   cfid->fid.lease_key,
-                   SMB2_LEASE_KEY_SIZE)) {
-               cfid->time = 0;
-               INIT_WORK(&cfid->lease_break,
-                         smb2_cached_lease_break);
-               queue_work(cifsiod_wq,
-                          &cfid->lease_break);
-               return true;
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry(cfid, &cfids->entries, entry) {
+               if (cfid->has_lease &&
+                   !memcmp(lease_key,
+                           cfid->fid.lease_key,
+                           SMB2_LEASE_KEY_SIZE)) {
+                       cfid->time = 0;
+                       /*
+                        * We found a lease remove it from the list
+                        * so no threads can access it.
+                        */
+                       list_del(&cfid->entry);
+                       cfid->on_list = false;
+                       cfids->num_entries--;
+
+                       queue_work(cifsiod_wq,
+                                  &cfid->lease_break);
+                       spin_unlock(&cfids->cfid_list_lock);
+                       return true;
+               }
        }
+       spin_unlock(&cfids->cfid_list_lock);
        return false;
 }
 
-struct cached_fid *init_cached_dir(const char *path)
+static struct cached_fid *init_cached_dir(const char *path)
 {
        struct cached_fid *cfid;
 
-       cfid = kzalloc(sizeof(*cfid), GFP_KERNEL);
+       cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
        if (!cfid)
                return NULL;
-       cfid->path = kstrdup(path, GFP_KERNEL);
+       cfid->path = kstrdup(path, GFP_ATOMIC);
        if (!cfid->path) {
                kfree(cfid);
                return NULL;
        }
 
+       INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
+       INIT_LIST_HEAD(&cfid->entry);
        INIT_LIST_HEAD(&cfid->dirents.entries);
        mutex_init(&cfid->dirents.de_mutex);
-       mutex_init(&cfid->fid_mutex);
+       spin_lock_init(&cfid->fid_lock);
+       kref_init(&cfid->refcount);
        return cfid;
 }
 
-void free_cached_dir(struct cached_fid *cfid)
+static void free_cached_dir(struct cached_fid *cfid)
 {
+       struct cached_dirent *dirent, *q;
+
+       dput(cfid->dentry);
+       cfid->dentry = NULL;
+
+       /*
+        * Delete all cached dirent names
+        */
+       list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
+               list_del(&dirent->entry);
+               kfree(dirent->name);
+               kfree(dirent);
+       }
+
        kfree(cfid->path);
        cfid->path = NULL;
        kfree(cfid);
        cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
        if (!cfids)
                return NULL;
-       mutex_init(&cfids->cfid_list_mutex);
+       spin_lock_init(&cfids->cfid_list_lock);
+       INIT_LIST_HEAD(&cfids->entries);
        return cfids;
 }
 
+/*
+ * Called from tconInfoFree when we are tearing down the tcon.
+ * There are no active users or open files/directories at this point.
+ */
 void free_cached_dirs(struct cached_fids *cfids)
 {
-       if (cfids->cfid) {
-               free_cached_dir(cfids->cfid);
-               cfids->cfid = NULL;
+       struct cached_fid *cfid, *q;
+       struct list_head entry;
+
+       INIT_LIST_HEAD(&entry);
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+               cfid->on_list = false;
+               cfid->is_open = false;
+               list_del(&cfid->entry);
+               list_add(&cfid->entry, &entry);
        }
+       spin_unlock(&cfids->cfid_list_lock);
+
+       list_for_each_entry_safe(cfid, q, &entry, entry) {
+               list_del(&cfid->entry);
+               free_cached_dir(cfid);
+       }
+
        kfree(cfids);
 }