* locations; additions must take care to only insert into the new bucket.
  */
 
-typedef struct cfs_hash {
+struct cfs_hash {
        /** serialize with rehash, or serialize all operations if
         * the hash-table has CFS_HASH_NO_BKTLOCK */
        union cfs_hash_lock          hs_lock;
 #endif
        /** name of htable */
        char                    hs_name[0];
-} cfs_hash_t;
+};
 
 typedef struct cfs_hash_lock_ops {
        /** lock the hash table */
 
 typedef struct cfs_hash_hlist_ops {
        /** return hlist_head of hash-head of @bd */
-       struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, struct cfs_hash_bd *bd);
+       struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
        /** return hash-head size */
-       int (*hop_hhead_size)(cfs_hash_t *hs);
+       int (*hop_hhead_size)(struct cfs_hash *hs);
        /** add @hnode to hash-head of @bd */
-       int (*hop_hnode_add)(cfs_hash_t *hs,
+       int (*hop_hnode_add)(struct cfs_hash *hs,
                             struct cfs_hash_bd *bd, struct hlist_node *hnode);
        /** remove @hnode from hash-head of @bd */
-       int (*hop_hnode_del)(cfs_hash_t *hs,
+       int (*hop_hnode_del)(struct cfs_hash *hs,
                             struct cfs_hash_bd *bd, struct hlist_node *hnode);
 } cfs_hash_hlist_ops_t;
 
 typedef struct cfs_hash_ops {
        /** return hashed value from @key */
-       unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
+       unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
        /** return key address of @hnode */
        void *   (*hs_key)(struct hlist_node *hnode);
        /** copy key from @hnode to @key */
        /** return object address of @hnode, i.e: container_of(...hnode) */
        void *   (*hs_object)(struct hlist_node *hnode);
        /** get refcount of item, always called with holding bucket-lock */
-       void     (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** release refcount of item */
-       void     (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** release refcount of item, always called with holding bucket-lock */
-       void     (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
        /** it's called before removing of @hnode */
-       void     (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
+       void     (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
 } cfs_hash_ops_t;
 
 /** total number of buckets in @hs */
 #define CFS_HASH_RH_NHLIST(hs)  (1U << (hs)->hs_rehash_bits)
 
 static inline int
-cfs_hash_with_no_lock(cfs_hash_t *hs)
+cfs_hash_with_no_lock(struct cfs_hash *hs)
 {
        /* caller will serialize all operations for this hash-table */
        return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_no_bktlock(cfs_hash_t *hs)
+cfs_hash_with_no_bktlock(struct cfs_hash *hs)
 {
        /* no bucket lock, one single lock to protect the hash-table */
        return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
+cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
 {
        /* rwlock to protect hash bucket */
        return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
+cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
 {
        /* spinlock to protect hash bucket */
        return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
 }
 
 static inline int
-cfs_hash_with_add_tail(cfs_hash_t *hs)
+cfs_hash_with_add_tail(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
 }
 
 static inline int
-cfs_hash_with_no_itemref(cfs_hash_t *hs)
+cfs_hash_with_no_itemref(struct cfs_hash *hs)
 {
        /* hash-table doesn't keep refcount on item,
         * item can't be removed from hash unless it's
 }
 
 static inline int
-cfs_hash_with_bigname(cfs_hash_t *hs)
+cfs_hash_with_bigname(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
 }
 
 static inline int
-cfs_hash_with_counter(cfs_hash_t *hs)
+cfs_hash_with_counter(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
 }
 
 static inline int
-cfs_hash_with_rehash(cfs_hash_t *hs)
+cfs_hash_with_rehash(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_REHASH) != 0;
 }
 
 static inline int
-cfs_hash_with_rehash_key(cfs_hash_t *hs)
+cfs_hash_with_rehash_key(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
 }
 
 static inline int
-cfs_hash_with_shrink(cfs_hash_t *hs)
+cfs_hash_with_shrink(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
 }
 
 static inline int
-cfs_hash_with_assert_empty(cfs_hash_t *hs)
+cfs_hash_with_assert_empty(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
 }
 
 static inline int
-cfs_hash_with_depth(cfs_hash_t *hs)
+cfs_hash_with_depth(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
 }
 
 static inline int
-cfs_hash_with_nblk_change(cfs_hash_t *hs)
+cfs_hash_with_nblk_change(struct cfs_hash *hs)
 {
        return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
 }
 
 static inline int
-cfs_hash_is_exiting(cfs_hash_t *hs)
+cfs_hash_is_exiting(struct cfs_hash *hs)
 {       /* cfs_hash_destroy is called */
        return hs->hs_exiting;
 }
 
 static inline int
-cfs_hash_is_rehashing(cfs_hash_t *hs)
+cfs_hash_is_rehashing(struct cfs_hash *hs)
 {       /* rehash is launched */
        return hs->hs_rehash_bits != 0;
 }
 
 static inline int
-cfs_hash_is_iterating(cfs_hash_t *hs)
+cfs_hash_is_iterating(struct cfs_hash *hs)
 {       /* someone is calling cfs_hash_for_each_* */
        return hs->hs_iterating || hs->hs_iterators != 0;
 }
 
 static inline int
-cfs_hash_bkt_size(cfs_hash_t *hs)
+cfs_hash_bkt_size(struct cfs_hash *hs)
 {
        return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
               hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
 #define CFS_HOP(hs, op)           (hs)->hs_ops->hs_ ## op
 
 static inline unsigned
-cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return CFS_HOP(hs, hash)(hs, key, mask);
 }
 
 static inline void *
-cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        return CFS_HOP(hs, key)(hnode);
 }
 
 static inline void
-cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
+cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
 {
        if (CFS_HOP(hs, keycpy) != NULL)
                CFS_HOP(hs, keycpy)(hnode, key);
  * Returns 1 on a match,
  */
 static inline int
-cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
        return CFS_HOP(hs, keycmp)(key, hnode);
 }
 
 static inline void *
-cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        return CFS_HOP(hs, object)(hnode);
 }
 
 static inline void
-cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        return CFS_HOP(hs, get)(hs, hnode);
 }
 
 static inline void
-cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        LASSERT(CFS_HOP(hs, put_locked) != NULL);
 
 }
 
 static inline void
-cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        LASSERT(CFS_HOP(hs, put) != NULL);
 
 }
 
 static inline void
-cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        if (CFS_HOP(hs, exit))
                CFS_HOP(hs, exit)(hs, hnode);
 }
 
-static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
 {
        hs->hs_lops->hs_lock(&hs->hs_lock, excl);
 }
 
-static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
+static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
 {
        hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
 }
 
-static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
                                        atomic_t *condition)
 {
        LASSERT(cfs_hash_with_no_bktlock(hs));
        return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
 }
 
-static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
+static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
                                    struct cfs_hash_bd *bd, int excl)
 {
        hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
 }
 
-static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
+static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
                                      struct cfs_hash_bd *bd, int excl)
 {
        hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
  * operations on cfs_hash bucket (bd: bucket descriptor),
  * they are normally for hash-table without rehash
  */
-void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd);
+void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd);
 
-static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
+static inline void cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
                                            struct cfs_hash_bd *bd, int excl)
 {
        cfs_hash_bd_get(hs, key, bd);
        cfs_hash_bd_lock(hs, bd, excl);
 }
 
-static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+static inline unsigned cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
 }
 
-static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
+static inline void cfs_hash_bd_index_set(struct cfs_hash *hs,
                                         unsigned index, struct cfs_hash_bd *bd)
 {
        bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
 }
 
 static inline void *
-cfs_hash_bd_extra_get(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        return (void *)bd->bd_bucket +
               cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
        return 0;
 }
 
-void cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                            struct hlist_node *hnode);
-void cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                            struct hlist_node *hnode);
-void cfs_hash_bd_move_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd_old,
+void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
                             struct cfs_hash_bd *bd_new, struct hlist_node *hnode);
 
-static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static inline int cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                           atomic_t *condition)
 {
        LASSERT(cfs_hash_with_spin_bktlock(hs));
                                       &bd->bd_bucket->hsb_lock.spin);
 }
 
-static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
+static inline struct hlist_head *cfs_hash_bd_hhead(struct cfs_hash *hs,
                                                  struct cfs_hash_bd *bd)
 {
        return hs->hs_hops->hop_hhead(hs, bd);
 }
 
-struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_bd_lookup_locked(struct cfs_hash *hs,
                                            struct cfs_hash_bd *bd, const void *key);
-struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_bd_peek_locked(struct cfs_hash *hs,
                                          struct cfs_hash_bd *bd, const void *key);
-struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_bd_findadd_locked(struct cfs_hash *hs,
                                             struct cfs_hash_bd *bd, const void *key,
                                             struct hlist_node *hnode,
                                             int insist_add);
-struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_bd_finddel_locked(struct cfs_hash *hs,
                                             struct cfs_hash_bd *bd, const void *key,
                                             struct hlist_node *hnode);
 
  * operations on cfs_hash bucket (bd: bucket descriptor),
  * they are safe for hash-table with rehash
  */
-void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds);
-void cfs_hash_dual_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl);
-void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl);
+void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds);
+void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
+void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl);
 
-static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
+static inline void cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
                                                 struct cfs_hash_bd *bds, int excl)
 {
        cfs_hash_dual_bd_get(hs, key, bds);
        cfs_hash_dual_bd_lock(hs, bds, excl);
 }
 
-struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs,
                                                 struct cfs_hash_bd *bds,
                                                 const void *key);
-struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs,
                                                  struct cfs_hash_bd *bds,
                                                  const void *key,
                                                  struct hlist_node *hnode,
                                                  int insist_add);
-struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
+struct hlist_node *cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs,
                                                  struct cfs_hash_bd *bds,
                                                  const void *key,
                                                  struct hlist_node *hnode);
 
 /* Hash init/cleanup functions */
-cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
+struct cfs_hash *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
                            unsigned bkt_bits, unsigned extra_bytes,
                            unsigned min_theta, unsigned max_theta,
                            cfs_hash_ops_t *ops, unsigned flags);
 
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
-void cfs_hash_putref(cfs_hash_t *hs);
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
+void cfs_hash_putref(struct cfs_hash *hs);
 
 /* Hash addition functions */
-void cfs_hash_add(cfs_hash_t *hs, const void *key,
+void cfs_hash_add(struct cfs_hash *hs, const void *key,
                  struct hlist_node *hnode);
-int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
+int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode);
-void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
                              struct hlist_node *hnode);
 
 /* Hash deletion functions */
-void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
-void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
+void *cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode);
+void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
 
 /* Hash lookup/for_each functions */
 #define CFS_HASH_LOOP_HOG       1024
 
-typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                      struct hlist_node *node, void *data);
-void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
-void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
-int  cfs_hash_for_each_nolock(cfs_hash_t *hs,
+void *cfs_hash_lookup(struct cfs_hash *hs, const void *key);
+void cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+int  cfs_hash_for_each_nolock(struct cfs_hash *hs,
                              cfs_hash_for_each_cb_t, void *data);
-int  cfs_hash_for_each_empty(cfs_hash_t *hs,
+int  cfs_hash_for_each_empty(struct cfs_hash *hs,
                             cfs_hash_for_each_cb_t, void *data);
-void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+void cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
                           cfs_hash_for_each_cb_t, void *data);
 typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
-void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
+void cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
 
-void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+void cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
                             cfs_hash_for_each_cb_t, void *data);
-int  cfs_hash_is_empty(cfs_hash_t *hs);
-__u64 cfs_hash_size_get(cfs_hash_t *hs);
+int  cfs_hash_is_empty(struct cfs_hash *hs);
+__u64 cfs_hash_size_get(struct cfs_hash *hs);
 
 /*
  * Rehash - Theta is calculated to be the average chained
  * hash depth assuming a perfectly uniform hash function.
  */
-void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
-void cfs_hash_rehash_cancel(cfs_hash_t *hs);
-int  cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
+void cfs_hash_rehash_cancel(struct cfs_hash *hs);
+int  cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
                         void *new_key, struct hlist_node *hnode);
 
 #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
 /* Validate hnode references the correct key */
 static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
                      struct hlist_node *hnode)
 {
        LASSERT(cfs_hash_keycmp(hs, key, hnode));
 
 /* Validate hnode is in the correct bucket */
 static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode)
 {
        struct cfs_hash_bd   bds[2];
 #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
 
 static inline void
-cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
                      struct hlist_node *hnode) {}
 
 static inline void
-cfs_hash_bucket_validate(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode) {}
 
 #endif /* CFS_HASH_DEBUG_LEVEL */
               (__cfs_hash_theta_int(theta) * 1000);
 }
 
-static inline int __cfs_hash_theta(cfs_hash_t *hs)
+static inline int __cfs_hash_theta(struct cfs_hash *hs)
 {
        return (atomic_read(&hs->hs_count) <<
                CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
 }
 
-static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
+static inline void __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
 {
        LASSERT(min < max);
        hs->hs_min_theta = (__u16)min;
 /* Generic debug formatting routines mainly for proc handler */
 struct seq_file;
 int cfs_hash_debug_header(struct seq_file *m);
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
 
 /*
  * Generic djb2 hash algorithm for character arrays.
 
        /**
         * objects hash table
         */
-       cfs_hash_t             *ls_obj_hash;
+       struct cfs_hash        *ls_obj_hash;
        /**
         * index of bucket on hash table while purging
         */
 
        ldlm_side_t             ns_client;
 
        /** Resource hash table for namespace. */
-       cfs_hash_t              *ns_rs_hash;
+       struct cfs_hash         *ns_rs_hash;
 
        /** serialize */
        spinlock_t              ns_lock;
 
        /** Connection count value from last succesful reconnect rpc */
        __u32                exp_conn_cnt;
        /** Hash list of all ldlm locks granted on this export */
-       cfs_hash_t             *exp_lock_hash;
+       struct cfs_hash        *exp_lock_hash;
        /**
         * Hash list for Posix lock deadlock detection, added with
         * ldlm_lock::l_exp_flock_hash.
         */
-       cfs_hash_t             *exp_flock_hash;
+       struct cfs_hash        *exp_flock_hash;
        struct list_head                exp_outstanding_replies;
        struct list_head                exp_uncommitted_replies;
        spinlock_t                exp_uncommitted_replies_lock;
 
 struct nrs_crrn_net {
        struct ptlrpc_nrs_resource      cn_res;
        cfs_binheap_t                  *cn_binheap;
-       cfs_hash_t                     *cn_cli_hash;
+       struct cfs_hash                *cn_cli_hash;
        /**
         * Used when a new scheduling round commences, in order to synchronize
         * all clients with the new round number.
 struct nrs_orr_data {
        struct ptlrpc_nrs_resource      od_res;
        cfs_binheap_t                  *od_binheap;
-       cfs_hash_t                     *od_obj_hash;
+       struct cfs_hash                *od_obj_hash;
        struct kmem_cache                      *od_cache;
        /**
         * Used when a new scheduling round commences, in order to synchronize
 
        /* ptlrpc work for writeback in ptlrpcd context */
        void                *cl_writeback_work;
        /* hash tables for osc_quota_info */
-       cfs_hash_t            *cl_quota_hash[MAXQUOTAS];
+       struct cfs_hash       *cl_quota_hash[MAXQUOTAS];
 };
 #define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
 
        __u32              lov_tgt_size;   /* size of tgts array */
        int                  lov_connects;
        int                  lov_pool_count;
-       cfs_hash_t           *lov_pools_hash_body; /* used for key access */
+       struct cfs_hash      *lov_pools_hash_body; /* used for key access */
        struct list_head              lov_pool_list; /* used for sequential access */
        struct proc_dir_entry   *lov_pool_proc_entry;
        enum lustre_sec_part    lov_sp_me;
         * protection of other bits using _bh lock */
        unsigned long obd_recovery_expired:1;
        /* uuid-export hash body */
-       cfs_hash_t           *obd_uuid_hash;
+       struct cfs_hash      *obd_uuid_hash;
        /* nid-export hash body */
-       cfs_hash_t           *obd_nid_hash;
+       struct cfs_hash      *obd_nid_hash;
        /* nid stats body */
-       cfs_hash_t           *obd_nid_stats_hash;
+       struct cfs_hash      *obd_nid_stats_hash;
        struct list_head              obd_nid_stats;
        atomic_t            obd_refcount;
        wait_queue_head_t            obd_refcount_waitq;
 
  * Export handle<->flock hash operations.
  */
 static unsigned
-ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(*(__u64 *)key, mask);
 }
 }
 
 static void
-ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 }
 
 static void
-ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 
        return LDLM_ITER_CONTINUE;
 }
 
-static int ldlm_reprocess_res(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_reprocess_res(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                              struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource *res = cfs_hash_object(hs, hnode);
  * Iterator function for ldlm_cancel_locks_for_export.
  * Cancels passed locks.
  */
-int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+int ldlm_cancel_locks_for_export_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                    struct hlist_node *hnode, void *data)
 
 {
 
  * Export handle<->lock hash operations.
  */
 static unsigned
-ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
 }
 }
 
 static void
-ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
 
 }
 
 static void
-ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
 
 
        void   *lc_opaque;
 };
 
-static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                       struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource       *res = cfs_hash_object(hs, hnode);
        return helper->iter(lock, helper->closure);
 }
 
-static int ldlm_res_iter_helper(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                struct hlist_node *hnode, void *arg)
 
 {
 
 
 #endif /* LPROCFS */
 
-static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
                                  const void *key, unsigned mask)
 {
        const struct ldlm_res_id     *id  = key;
        return val & mask;
 }
 
-static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
+static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
                                      const void *key, unsigned mask)
 {
        const struct ldlm_res_id *id = key;
        return hlist_entry(hnode, struct ldlm_resource, lr_hash);
 }
 
-static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_get_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_resource *res;
 
        ldlm_resource_getref(res);
 }
 
-static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_resource *res;
 
        ldlm_resource_putref_locked(res);
 }
 
-static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_resource *res;
 
        } while (1);
 }
 
-static int ldlm_resource_clean(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                               struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource *res = cfs_hash_object(hs, hnode);
        return 0;
 }
 
-static int ldlm_resource_complain(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                                  struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
 }
 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
 
-static int ldlm_res_hash_dump(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                              struct hlist_node *hnode, void *arg)
 {
        struct ldlm_resource *res = cfs_hash_object(hs, hnode);
 
 };
 
 static void
-cfs_hash_lock_setup(cfs_hash_t *hs)
+cfs_hash_lock_setup(struct cfs_hash *hs)
 {
        if (cfs_hash_with_no_lock(hs)) {
                hs->hs_lops = &cfs_hash_nl_lops;
 } cfs_hash_head_t;
 
 static int
-cfs_hash_hh_hhead_size(cfs_hash_t *hs)
+cfs_hash_hh_hhead_size(struct cfs_hash *hs)
 {
        return sizeof(cfs_hash_head_t);
 }
 
 static struct hlist_head *
-cfs_hash_hh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
 
 }
 
 static int
-cfs_hash_hh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
 }
 
 static int
-cfs_hash_hh_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        hlist_del_init(hnode);
 } cfs_hash_head_dep_t;
 
 static int
-cfs_hash_hd_hhead_size(cfs_hash_t *hs)
+cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 {
        return sizeof(cfs_hash_head_dep_t);
 }
 
 static struct hlist_head *
-cfs_hash_hd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        cfs_hash_head_dep_t   *head;
 
 }
 
 static int
-cfs_hash_hd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
 }
 
 static int
-cfs_hash_hd_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
 } cfs_hash_dhead_t;
 
 static int
-cfs_hash_dh_hhead_size(cfs_hash_t *hs)
+cfs_hash_dh_hhead_size(struct cfs_hash *hs)
 {
        return sizeof(cfs_hash_dhead_t);
 }
 
 static struct hlist_head *
-cfs_hash_dh_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        cfs_hash_dhead_t *head;
 
 }
 
 static int
-cfs_hash_dh_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
 }
 
 static int
-cfs_hash_dh_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
        cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
 } cfs_hash_dhead_dep_t;
 
 static int
-cfs_hash_dd_hhead_size(cfs_hash_t *hs)
+cfs_hash_dd_hhead_size(struct cfs_hash *hs)
 {
        return sizeof(cfs_hash_dhead_dep_t);
 }
 
 static struct hlist_head *
-cfs_hash_dd_hhead(cfs_hash_t *hs, struct cfs_hash_bd *bd)
+cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
        cfs_hash_dhead_dep_t *head;
 
 }
 
 static int
-cfs_hash_dd_hnode_add(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
        cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
 }
 
 static int
-cfs_hash_dd_hnode_del(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
        cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
 };
 
 static void
-cfs_hash_hlist_setup(cfs_hash_t *hs)
+cfs_hash_hlist_setup(struct cfs_hash *hs)
 {
        if (cfs_hash_with_add_tail(hs)) {
                hs->hs_hops = cfs_hash_with_depth(hs) ?
 }
 
 static void
-cfs_hash_bd_from_key(cfs_hash_t *hs, struct cfs_hash_bucket **bkts,
+cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
                     unsigned int bits, const void *key, struct cfs_hash_bd *bd)
 {
        unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
 }
 
 void
-cfs_hash_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bd)
+cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 {
        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
        if (likely(hs->hs_rehash_buckets == NULL)) {
 EXPORT_SYMBOL(cfs_hash_bd_get);
 
 static inline void
-cfs_hash_bd_dep_record(cfs_hash_t *hs, struct cfs_hash_bd *bd, int dep_cur)
+cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
 {
        if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
                return;
 }
 
 void
-cfs_hash_bd_add_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                       struct hlist_node *hnode)
 {
        int             rc;
 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 
 void
-cfs_hash_bd_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                       struct hlist_node *hnode)
 {
        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 
 void
-cfs_hash_bd_move_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd_old,
+cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
                        struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
 {
        struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
 } cfs_hash_lookup_intent_t;
 
 static struct hlist_node *
-cfs_hash_bd_lookup_intent(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                          const void *key, struct hlist_node *hnode,
                          cfs_hash_lookup_intent_t intent)
 
 }
 
 struct hlist_node *
-cfs_hash_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *key)
+cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                         CFS_HS_LOOKUP_IT_FIND);
 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_bd_peek_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd, const void *key)
+cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
                                         CFS_HS_LOOKUP_IT_PEEK);
 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 
 struct hlist_node *
-cfs_hash_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           const void *key, struct hlist_node *hnode,
                           int noref)
 {
 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
 
 struct hlist_node *
-cfs_hash_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           const void *key, struct hlist_node *hnode)
 {
        /* hnode can be NULL, we find the first item with @key */
 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
 
 static void
-cfs_hash_multi_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                       unsigned n, int excl)
 {
        struct cfs_hash_bucket *prev = NULL;
 }
 
 static void
-cfs_hash_multi_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                         unsigned n, int excl)
 {
        struct cfs_hash_bucket *prev = NULL;
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                unsigned n, const void *key)
 {
        struct hlist_node  *ehnode;
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
+cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
                                 struct cfs_hash_bd *bds, unsigned n, const void *key,
                                 struct hlist_node *hnode, int noref)
 {
 }
 
 static struct hlist_node *
-cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                 unsigned n, const void *key,
                                 struct hlist_node *hnode)
 {
 }
 
 void
-cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, struct cfs_hash_bd *bds)
+cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
 {
        /* NB: caller should hold hs_lock.rw if REHASH is set */
        cfs_hash_bd_from_key(hs, hs->hs_buckets,
 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
 
 void
-cfs_hash_dual_bd_lock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl)
+cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
        cfs_hash_multi_bd_lock(hs, bds, 2, excl);
 }
 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
 
 void
-cfs_hash_dual_bd_unlock(cfs_hash_t *hs, struct cfs_hash_bd *bds, int excl)
+cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 {
        cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
 }
 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
 
 struct hlist_node *
-cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                               const void *key)
 {
        return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode,
                                int noref)
 {
 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
 
 struct hlist_node *
-cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, struct cfs_hash_bd *bds,
+cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
                                const void *key, struct hlist_node *hnode)
 {
        return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
  * successful, and NULL on error.
  */
 static struct cfs_hash_bucket **
-cfs_hash_buckets_realloc(cfs_hash_t *hs, struct cfs_hash_bucket **old_bkts,
+cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
                         unsigned int old_size, unsigned int new_size)
 {
        struct cfs_hash_bucket **new_bkts;
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static int cfs_hash_dep_print(cfs_workitem_t *wi)
 {
-       cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+       struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
        int      dep;
        int      bkt;
        int      off;
        return 0;
 }
 
-static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
 {
        spin_lock_init(&hs->hs_dep_lock);
        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 }
 
-static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
+static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 {
        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
                return;
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
 
-static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
-static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
+static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
+static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
 
 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
 
-cfs_hash_t *
+struct cfs_hash *
 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
                unsigned bkt_bits, unsigned extra_bytes,
                unsigned min_theta, unsigned max_theta,
                cfs_hash_ops_t *ops, unsigned flags)
 {
-       cfs_hash_t *hs;
+       struct cfs_hash *hs;
        int      len;
 
        CLASSERT(CFS_HASH_THETA_BITS < 15);
 
        len = (flags & CFS_HASH_BIGNAME) == 0 ?
              CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
-       LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
        if (hs == NULL)
                return NULL;
 
        if (hs->hs_buckets != NULL)
                return hs;
 
-       LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
        return NULL;
 }
 EXPORT_SYMBOL(cfs_hash_create);
  * Cleanup libcfs hash @hs.
  */
 static void
-cfs_hash_destroy(cfs_hash_t *hs)
+cfs_hash_destroy(struct cfs_hash *hs)
 {
        struct hlist_node     *hnode;
        struct hlist_node     *pos;
                              0, CFS_HASH_NBKT(hs));
        i = cfs_hash_with_bigname(hs) ?
            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
-       LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+       LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
 }
 
-cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
+struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
 {
        if (atomic_inc_not_zero(&hs->hs_refcount))
                return hs;
 }
 EXPORT_SYMBOL(cfs_hash_getref);
 
-void cfs_hash_putref(cfs_hash_t *hs)
+void cfs_hash_putref(struct cfs_hash *hs)
 {
        if (atomic_dec_and_test(&hs->hs_refcount))
                cfs_hash_destroy(hs);
 EXPORT_SYMBOL(cfs_hash_putref);
 
 static inline int
-cfs_hash_rehash_bits(cfs_hash_t *hs)
+cfs_hash_rehash_bits(struct cfs_hash *hs)
 {
        if (cfs_hash_with_no_lock(hs) ||
            !cfs_hash_with_rehash(hs))
  * - too many elements
  */
 static inline int
-cfs_hash_rehash_inline(cfs_hash_t *hs)
+cfs_hash_rehash_inline(struct cfs_hash *hs)
 {
        return !cfs_hash_with_nblk_change(hs) &&
               atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
  * ops->hs_get function will be called when the item is added.
  */
 void
-cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
        struct cfs_hash_bd   bd;
        int          bits;
 EXPORT_SYMBOL(cfs_hash_add);
 
 static struct hlist_node *
-cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
+cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
                     struct hlist_node *hnode, int noref)
 {
        struct hlist_node *ehnode;
  * Returns 0 on success or -EALREADY on key collisions.
  */
 int
-cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
        return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
               -EALREADY : 0;
  * Otherwise ops->hs_get is called on the item which was added.
  */
 void *
-cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
                        struct hlist_node *hnode)
 {
        hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
  * on the removed object.
  */
 void *
-cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 {
        void       *obj  = NULL;
        int          bits = 0;
  * will be returned and ops->hs_put is called on the removed object.
  */
 void *
-cfs_hash_del_key(cfs_hash_t *hs, const void *key)
+cfs_hash_del_key(struct cfs_hash *hs, const void *key)
 {
        return cfs_hash_del(hs, key, NULL);
 }
  * in the hash @hs NULL is returned.
  */
 void *
-cfs_hash_lookup(cfs_hash_t *hs, const void *key)
+cfs_hash_lookup(struct cfs_hash *hs, const void *key)
 {
        void             *obj = NULL;
        struct hlist_node     *hnode;
 EXPORT_SYMBOL(cfs_hash_lookup);
 
 static void
-cfs_hash_for_each_enter(cfs_hash_t *hs)
+cfs_hash_for_each_enter(struct cfs_hash *hs)
 {
        LASSERT(!cfs_hash_is_exiting(hs));
 
 }
 
 static void
-cfs_hash_for_each_exit(cfs_hash_t *hs)
+cfs_hash_for_each_exit(struct cfs_hash *hs)
 {
        int remained;
        int bits;
  *      cfs_hash_bd_del_locked
  */
 static __u64
-cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
+cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
                        void *data, int remove_safe)
 {
        struct hlist_node     *hnode;
 } cfs_hash_cond_arg_t;
 
 static int
-cfs_hash_cond_del_locked(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode, void *data)
 {
        cfs_hash_cond_arg_t *cond = data;
  * any object be reference.
  */
 void
-cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
 {
        cfs_hash_cond_arg_t arg = {
                .func   = func,
 EXPORT_SYMBOL(cfs_hash_cond_del);
 
 void
-cfs_hash_for_each(cfs_hash_t *hs,
+cfs_hash_for_each(struct cfs_hash *hs,
                  cfs_hash_for_each_cb_t func, void *data)
 {
        cfs_hash_for_each_tight(hs, func, data, 0);
 EXPORT_SYMBOL(cfs_hash_for_each);
 
 void
-cfs_hash_for_each_safe(cfs_hash_t *hs,
+cfs_hash_for_each_safe(struct cfs_hash *hs,
                       cfs_hash_for_each_cb_t func, void *data)
 {
        cfs_hash_for_each_tight(hs, func, data, 1);
 EXPORT_SYMBOL(cfs_hash_for_each_safe);
 
 static int
-cfs_hash_peek(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
              struct hlist_node *hnode, void *data)
 {
        *(int *)data = 0;
 }
 
 int
-cfs_hash_is_empty(cfs_hash_t *hs)
+cfs_hash_is_empty(struct cfs_hash *hs)
 {
        int empty = 1;
 
 EXPORT_SYMBOL(cfs_hash_is_empty);
 
 __u64
-cfs_hash_size_get(cfs_hash_t *hs)
+cfs_hash_size_get(struct cfs_hash *hs)
 {
        return cfs_hash_with_counter(hs) ?
               atomic_read(&hs->hs_count) :
  * two cases, so iteration has to be stopped on change.
  */
 static int
-cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
+cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_node *hnode;
        struct hlist_node *tmp;
 }
 
 int
-cfs_hash_for_each_nolock(cfs_hash_t *hs,
+cfs_hash_for_each_nolock(struct cfs_hash *hs,
                         cfs_hash_for_each_cb_t func, void *data)
 {
        if (cfs_hash_with_no_lock(hs) ||
  * the required locking is in place to prevent concurrent insertions.
  */
 int
-cfs_hash_for_each_empty(cfs_hash_t *hs,
+cfs_hash_for_each_empty(struct cfs_hash *hs,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        unsigned  i = 0;
 EXPORT_SYMBOL(cfs_hash_for_each_empty);
 
 void
-cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
                        cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_head   *hhead;
  * is held so the callback must never sleep.
    */
 void
-cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
                      cfs_hash_for_each_cb_t func, void *data)
 {
        struct hlist_node   *hnode;
  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
  */
 void
-cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
+cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
 {
        int     i;
 
 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
 
 void
-cfs_hash_rehash_cancel(cfs_hash_t *hs)
+cfs_hash_rehash_cancel(struct cfs_hash *hs)
 {
        cfs_hash_lock(hs, 1);
        cfs_hash_rehash_cancel_locked(hs);
 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
 
 int
-cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
+cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
 {
        int     rc;
 
 EXPORT_SYMBOL(cfs_hash_rehash);
 
 static int
-cfs_hash_rehash_bd(cfs_hash_t *hs, struct cfs_hash_bd *old)
+cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
 {
        struct cfs_hash_bd      new;
        struct hlist_head  *hhead;
 static int
 cfs_hash_rehash_worker(cfs_workitem_t *wi)
 {
-       cfs_hash_t       *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
+       struct cfs_hash  *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
        struct cfs_hash_bucket **bkts;
        struct cfs_hash_bd       bd;
        unsigned int    old_size;
  * the registered cfs_hash_get() and cfs_hash_put() functions will
  * not be called.
  */
-void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
                         void *new_key, struct hlist_node *hnode)
 {
        struct cfs_hash_bd      bds[3];
 EXPORT_SYMBOL(cfs_hash_debug_header);
 
 static struct cfs_hash_bucket **
-cfs_hash_full_bkts(cfs_hash_t *hs)
+cfs_hash_full_bkts(struct cfs_hash *hs)
 {
        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
        if (hs->hs_rehash_buckets == NULL)
 }
 
 static unsigned int
-cfs_hash_full_nbkt(cfs_hash_t *hs)
+cfs_hash_full_nbkt(struct cfs_hash *hs)
 {
        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
        if (hs->hs_rehash_buckets == NULL)
               CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
 }
 
-int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
+int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
 {
        int                 dist[8] = { 0, };
        int                 maxdep  = -1;
 
                ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
 }
 
-static int vvp_pgcache_obj_get(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+static int vvp_pgcache_obj_get(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                               struct hlist_node *hnode, void *data)
 {
        struct vvp_pgcache_id   *id  = data;
 
  * Chapter 6.4.
  * Addison Wesley, 1973
  */
-static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
 {
        int i;
        __u32 result;
        return hlist_entry(hnode, struct pool_desc, pool_hash);
 }
 
-static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void pool_hashrefcount_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct pool_desc *pool;
 
        lov_pool_getref(pool);
 }
 
-static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+static void pool_hashrefcount_put_locked(struct cfs_hash *hs,
                                         struct hlist_node *hnode)
 {
        struct pool_desc *pool;
 
  * The implementation of using hash table to connect cl_env and thread
  */
 
-static cfs_hash_t *cl_env_hash;
+static struct cfs_hash *cl_env_hash;
 
-static unsigned cl_env_hops_hash(cfs_hash_t *lh,
+static unsigned cl_env_hops_hash(struct cfs_hash *lh,
                                 const void *key, unsigned mask)
 {
 #if BITS_PER_LONG == 64
        return (key == cle->ce_owner);
 }
 
-static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
+static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
 {
        struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
        LASSERT(cle->ce_magic == &cl_env_init0);
 
                                    struct obd_uuid *cluuid)
 {
        struct obd_export *export;
-       cfs_hash_t *hash = NULL;
+       struct cfs_hash *hash = NULL;
        int rc = 0;
 
        OBD_ALLOC_PTR(export);
 
 int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
 {
-       cfs_hash_t *nid_hash;
+       struct cfs_hash *nid_hash;
        struct obd_export *doomed_exp = NULL;
        int exports_evicted = 0;
 
 
 int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
 {
-       cfs_hash_t *uuid_hash;
+       struct cfs_hash *uuid_hash;
        struct obd_export *doomed_exp = NULL;
        struct obd_uuid doomed_uuid;
        int exports_evicted = 0;
 
 
 void lprocfs_free_per_client_stats(struct obd_device *obd)
 {
-       cfs_hash_t *hash = obd->obd_nid_stats_hash;
+       struct cfs_hash *hash = obd->obd_nid_stats_hash;
        struct nid_stat *stat;
 
        /* we need extra list - because hash_exit called to early */
 }
 EXPORT_SYMBOL(lprocfs_init_ldlm_stats);
 
-int lprocfs_exp_print_uuid(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+int lprocfs_exp_print_uuid(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           struct hlist_node *hnode, void *data)
 
 {
        bool            first;
 };
 
-int lprocfs_exp_print_hash(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+int lprocfs_exp_print_hash(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           struct hlist_node *hnode, void *cb_data)
 
 {
 
        top = o->lo_header;
        set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
        if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
-               cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
+               struct cfs_hash *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
                struct cfs_hash_bd bd;
 
                cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
                                       const struct lu_object_conf *conf)
 {
        struct lu_object        *o;
-       cfs_hash_t            *hs;
+       struct cfs_hash       *hs;
        struct cfs_hash_bd          bd;
        struct lu_site_bkt_data *bkt;
 
        struct lu_object      *o;
        struct lu_object      *shadow;
        struct lu_site  *s;
-       cfs_hash_t          *hs;
+       struct cfs_hash     *hs;
        struct cfs_hash_bd        bd;
        __u64             version = 0;
 
 };
 
 static int
-lu_site_obj_print(cfs_hash_t *hs, struct cfs_hash_bd *bd,
+lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                  struct hlist_node *hnode, void *data)
 {
        struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
        return bits;
 }
 
-static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
                                const void *key, unsigned mask)
 {
        struct lu_fid  *fid = (struct lu_fid *)key;
        return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
 }
 
-static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lu_object_header *h;
 
        }
 }
 
-static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        LBUG(); /* we should never called it */
 }
        unsigned        lss_busy;
 } lu_site_stats_t;
 
-static void lu_site_stats_get(cfs_hash_t *hs,
+static void lu_site_stats_get(struct cfs_hash *hs,
                              lu_site_stats_t *stats, int populated)
 {
        struct cfs_hash_bd bd;
        struct lu_site_bkt_data *bkt;
        struct lu_object        *shadow;
        wait_queue_t             waiter;
-       cfs_hash_t              *hs;
+       struct cfs_hash         *hs;
        struct cfs_hash_bd       bd;
        __u64                    version = 0;
 
 
  */
 
 static unsigned
-uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
                                  sizeof(((struct obd_uuid *)key)->uuid), mask);
 }
 
 static void
-uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct obd_export *exp;
 
 }
 
 static void
-uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+uuid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct obd_export *exp;
 
  */
 
 static unsigned
-nid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+nid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
 }
 }
 
 static void
-nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct obd_export *exp;
 
 }
 
 static void
-nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nid_export_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct obd_export *exp;
 
 }
 
 static void
-nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nid_stat *ns;
 
 }
 
 static void
-nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+nidstats_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct nid_stat *ns;
 
 
  * Hash operations for uid/gid <-> osc_quota_info
  */
 static unsigned
-oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u32_hash(*((__u32*)key), mask);
 }
 }
 
 static void
-oqi_get(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
 }
 
 static void
-oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
 }
 
 static void
-oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+oqi_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct osc_quota_info *oqi;
 
 
 
 #include "ptlrpc_internal.h"
 
-static cfs_hash_t *conn_hash = NULL;
+static struct cfs_hash *conn_hash = NULL;
 static cfs_hash_ops_t conn_hash_ops;
 
 struct ptlrpc_connection *
  * Hash operations for net_peer<->connection
  */
 static unsigned
-conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
 }
 }
 
 static void
-conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;
 
 }
 
 static void
-conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;
 
 }
 
 static void
-conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+conn_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ptlrpc_connection *conn;