# define DEBUG_SUBSYSTEM S_UNDEFINED
 #endif
 
-#define CDEBUG_DEFAULT_MAX_DELAY (cfs_time_seconds(600))        /* jiffies */
-#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
+#define CDEBUG_DEFAULT_MAX_DELAY (600 * HZ)     /* jiffies */
+#define CDEBUG_DEFAULT_MIN_DELAY ((HZ + 1) / 2) /* jiffies */
 #define CDEBUG_DEFAULT_BACKOFF   2
 struct cfs_debug_limit_state {
        unsigned long   cdls_next;
 
 
 static inline unsigned long cfs_time_shift(int seconds)
 {
-       return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
+       return cfs_time_add(cfs_time_current(), seconds * HZ);
 }
 
 /*
 
        return jiffies;
 }
 
-static inline long cfs_time_seconds(int seconds)
-{
-       return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
-}
-
 static inline long cfs_duration_sec(long d)
 {
        return d / msecs_to_jiffies(MSEC_PER_SEC);
 static inline u64 cfs_time_shift_64(int seconds)
 {
        return cfs_time_add_64(cfs_time_current_64(),
-                              cfs_time_seconds(seconds));
+                              seconds * HZ);
 }
 
 static inline int cfs_time_before_64(u64 t1, u64 t2)
 
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
                set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout(HZ / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
 
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(interval);
-               if (interval < cfs_time_seconds(1))
+               if (interval < HZ)
                        interval *= 2;
 
                goto again;
                               "Waiting for %d threads to terminate\n",
                               atomic_read(&kiblnd_data.kib_nthreads));
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout(HZ);
                }
 
                /* fall through */
                               libcfs_nid2str(ni->ni_nid),
                               atomic_read(&net->ibn_npeers));
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout(HZ);
                }
 
                kiblnd_net_fini_pools(net);
 
                add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_unlock_irqrestore(glock, flags);
 
-               rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
-                                                  cfs_time_seconds(1));
+               rc = schedule_timeout(long_sleep ? 10 * HZ :
+                                                  HZ);
                remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_lock_irqsave(glock, flags);
 
 
        switch (conn->ksnc_rx_state) {
        case SOCKNAL_RX_LNET_PAYLOAD:
                last_rcv = conn->ksnc_rx_deadline -
-                          cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
+                          *ksocknal_tunables.ksnd_timeout * HZ;
                CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
                       libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
                       &conn->ksnc_ipaddr, conn->ksnc_port,
                                ksocknal_data.ksnd_nthreads);
                        read_unlock(&ksocknal_data.ksnd_global_lock);
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout(HZ);
                        read_lock(&ksocknal_data.ksnd_global_lock);
                }
                read_unlock(&ksocknal_data.ksnd_global_lock);
                       "waiting for %d peers to disconnect\n",
                       net->ksnn_npeers);
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
 
                ksocknal_debug_peerhash(ni);
 
 
 
        if (ksocknal_data.ksnd_stall_tx) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+               schedule_timeout(ksocknal_data.ksnd_stall_tx * HZ);
        }
 
        LASSERT(tx->tx_resid);
 
        if (ksocknal_data.ksnd_stall_rx) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
+               schedule_timeout(ksocknal_data.ksnd_stall_rx * HZ);
        }
 
        rc = ksocknal_connsock_addref(conn);
        int rc = 0;
 
        deadline = cfs_time_add(cfs_time_current(),
-                               cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
+                               *ksocknal_tunables.ksnd_timeout * HZ);
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
                         * so min_reconnectms should be good heuristic
                         */
                        route->ksnr_retry_interval =
-                               cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
+                               *ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000;
                        route->ksnr_timeout = cfs_time_add(cfs_time_current(),
                                                           route->ksnr_retry_interval);
                }
        route->ksnr_retry_interval *= 2;
        route->ksnr_retry_interval =
                max(route->ksnr_retry_interval,
-                   cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
+                   (long)*ksocknal_tunables.ksnd_min_reconnectms * HZ / 1000);
        route->ksnr_retry_interval =
                min(route->ksnr_retry_interval,
-                   cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
+                   (long)*ksocknal_tunables.ksnd_max_reconnectms * HZ / 1000);
 
        LASSERT(route->ksnr_retry_interval);
        route->ksnr_timeout = cfs_time_add(cfs_time_current(),
 
        if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
                /* may run out of resource, retry later */
-               *timeout = cfs_time_seconds(1);
+               *timeout = HZ;
                return 0;
        }
 
        val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
                    SOCKNAL_CONND_TIMEOUT - sec);
 
-       *timeout = (val > 0) ? cfs_time_seconds(val) :
-                              cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
+       *timeout = (val > 0) ? val * HZ :
+                              SOCKNAL_CONND_TIMEOUT * HZ;
        if (val > 0)
                return 0;
 
        if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
            time_before(cfs_time_current(),
                        cfs_time_add(peer->ksnp_last_alive,
-                                    cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
+                                    *ksocknal_tunables.ksnd_keepalive * HZ)))
                return 0;
 
        if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
                                             ksocknal_data.ksnd_peer_hash_size;
                        }
 
-                       deadline = cfs_time_add(deadline, cfs_time_seconds(p));
+                       deadline = cfs_time_add(deadline, p * HZ);
                }
 
                if (nenomem_conns) {
 
        if (rc)
                return -EINVAL;
 
-       d = cfs_time_seconds(sec) / 100;
+       d = sec * HZ / 100;
        if (d < min || d > max)
                return -EINVAL;
 
 
                CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
                       id, ms);
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ms) / 1000);
+               schedule_timeout(ms * HZ / 1000);
                CERROR("cfs_fail_timeout id %x awake\n", id);
        }
        return ret;
 
 
                if (cfs_time_after(cfs_time_current(),
                                   cdls->cdls_next + libcfs_console_max_delay +
-                                  cfs_time_seconds(10))) {
+                                  10 * HZ)) {
                        /* last timeout was a long time ago */
                        cdls->cdls_delay /= libcfs_console_backoff * 4;
                } else {
                init_waitqueue_entry(&__wait, current);
                add_wait_queue(&tctl->tctl_waitq, &__wait);
                set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
                remove_wait_queue(&tctl->tctl_waitq, &__wait);
        }
        complete(&tctl->tctl_stop);
 
                        if (rc != -EAGAIN) {
                                CWARN("Accept error %d: pausing...\n", rc);
                                set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(1));
+                               schedule_timeout(HZ);
                        }
                        continue;
                }
 
        while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
                CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
        }
 
        cfs_restore_sigs(blocked);
                                       libcfs_nid2str(ni->ni_nid));
                        }
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout(HZ);
                        lnet_net_lock(LNET_LOCK_EX);
                        continue;
                }
 
                return 0;
 
        deadline = cfs_time_add(lp->lp_last_alive,
-                               cfs_time_seconds(lp->lp_ni->ni_peertimeout));
+                               lp->lp_ni->ni_peertimeout * HZ);
        alive = cfs_time_after(deadline, now);
 
        /* Update obsolete lp_alive except for routers assumed to be dead
 
                unsigned long next_query =
                           cfs_time_add(lp->lp_last_query,
-                                       cfs_time_seconds(lnet_queryinterval));
+                                       lnet_queryinterval * HZ);
 
                if (time_before(now, next_query)) {
                        if (lp->lp_alive)
 
                                rule->dr_time_base = now;
 
                        rule->dr_drop_time = rule->dr_time_base +
-                               cfs_time_seconds(
-                                       prandom_u32_max(attr->u.drop.da_interval));
-                       rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval);
+                               prandom_u32_max(attr->u.drop.da_interval) * HZ;
+                       rule->dr_time_base += attr->u.drop.da_interval * HZ;
 
                        CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n",
                               libcfs_nid2str(attr->fa_src),
 static unsigned long
 round_timeout(unsigned long timeout)
 {
-       return cfs_time_seconds((unsigned int)
-                       cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
+       return (unsigned int)rounddown(timeout, HZ) + HZ;
 }
 
 static void
                                rule->dl_time_base = now;
 
                        rule->dl_delay_time = rule->dl_time_base +
-                               cfs_time_seconds(
-                                       prandom_u32_max(
-                                               attr->u.delay.la_interval));
-                       rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval);
+                               prandom_u32_max(attr->u.delay.la_interval) * HZ;
+                       rule->dl_time_base += attr->u.delay.la_interval * HZ;
 
                        CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n",
                               libcfs_nid2str(attr->fa_src),
 
                               ptable->pt_zombies);
                }
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) >> 1);
+               schedule_timeout(HZ >> 1);
                lnet_net_lock(cpt_locked);
        }
 }
 
                        return;
 
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
        }
 }
 
 
        if (secs && !rtr->lp_ping_notsent &&
            cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
-                                            cfs_time_seconds(secs)))) {
+                                            secs * HZ))) {
                int rc;
                struct lnet_process_id id;
                struct lnet_handle_md mdh;
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                       "Waiting for rc buffers to unlink\n");
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 4);
+               schedule_timeout(HZ / 4);
 
                lnet_net_lock(LNET_LOCK_EX);
        }
                else
                        wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
                                                         false,
-                                                        cfs_time_seconds(1));
+                                                        HZ);
        }
 
        lnet_prune_rc_data(1); /* wait for UNLINK */
 
 
        rc = wait_event_interruptible_timeout(trans->tas_waitq,
                                              lstcon_rpc_trans_check(trans),
-                                             cfs_time_seconds(timeout));
+                                             timeout * HZ);
        rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT);
 
        mutex_lock(&console_session.ses_mutex);
 
                CWARN("Session is shutting down, waiting for termination of transactions\n");
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
 
                mutex_lock(&console_session.ses_mutex);
        }
 
 
        /* 1 second pause to avoid timestamp reuse */
        set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(cfs_time_seconds(1));
+       schedule_timeout(HZ);
        srpc_data.rpc_matchbits = ((__u64)ktime_get_real_seconds()) << 48;
 
        srpc_data.rpc_state = SRPC_STATE_NONE;
 
 #define selftest_wait_events()                                 \
        do {                                                    \
                set_current_state(TASK_UNINTERRUPTIBLE);        \
-               schedule_timeout(cfs_time_seconds(1) / 10);     \
+               schedule_timeout(HZ / 10);      \
        } while (0)
 
 #define lst_wait_until(cond, lock, fmt, ...)                           \
 
 
                rc = wait_event_timeout(stt_data.stt_waitq,
                                        stt_data.stt_shuttingdown,
-                                       cfs_time_seconds(STTIMER_SLOTTIME));
+                                       STTIMER_SLOTTIME * HZ);
        }
 
        spin_lock(&stt_data.stt_lock);
 
 #define OBD_LDLM_DEVICENAME  "ldlm"
 
 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
+#define LDLM_DEFAULT_MAX_ALIVE (65 * 60 * HZ) /* 65 min */
 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
 
 /**
 
         */
        while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
                mutex_unlock(&lck->rpcl_mutex);
-               schedule_timeout(cfs_time_seconds(1) / 4);
+               schedule_timeout(HZ / 4);
                goto again;
        }
 
 
 {
        if (req->rq_delay_limit != 0 &&
            time_before(cfs_time_add(req->rq_queued_time,
-                                    cfs_time_seconds(req->rq_delay_limit)),
+                                    req->rq_delay_limit * HZ),
                        cfs_time_current())) {
                return 1;
        }
 
                                }
                        }
 
-                       lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
+                       lwi = LWI_TIMEOUT_INTR(obd_timeout * HZ,
                                               NULL, LWI_ON_SIGNAL_NOOP, NULL);
 
                        /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
 
        LDLM_DEBUG(lock, "client completion callback handler START");
 
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
-               int to = cfs_time_seconds(1);
+               int to = HZ;
 
                while (to > 0) {
                        set_current_state(TASK_INTERRUPTIBLE);
            !lock->l_readers && !lock->l_writers &&
            cfs_time_after(cfs_time_current(),
                           cfs_time_add(lock->l_last_used,
-                                       cfs_time_seconds(10)))) {
+                                       10 * HZ))) {
                unlock_res_and_lock(lock);
                if (ldlm_bl_to_thread_lock(ns, NULL, lock))
                        ldlm_handle_bl_callback(ns, NULL, lock);
 
                 * Wait until the next check time, or until we're
                 * stopped.
                 */
-               lwi = LWI_TIMEOUT(cfs_time_seconds(c_time),
+               lwi = LWI_TIMEOUT(c_time * HZ,
                                  NULL, NULL);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopping(thread) ||
 
                LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
                lwi = LWI_INTR(interrupted_completion_wait, &lwd);
        } else {
-               lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
+               lwi = LWI_TIMEOUT_INTR(timeout * HZ,
                                       ldlm_expired_completion_wait,
                                       interrupted_completion_wait, &lwd);
        }
 
                        LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
                        if (lock->l_flags & LDLM_FL_FAIL_LOC) {
                                set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(4));
+                               schedule_timeout(4 * HZ);
                                set_current_state(TASK_RUNNING);
                        }
                        if (lock->l_completion_ast)
 
         * to decrement mnt_cnt and hope to finish it within 10sec.
         */
        init_waitqueue_head(&waitq);
-       lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(10),
-                                  cfs_time_seconds(1), NULL, NULL);
+       lwi = LWI_TIMEOUT_INTERVAL(10 * HZ,
+                                  HZ, NULL, NULL);
        l_wait_event(waitq, may_umount(sbi->ll_mnt.mnt), &lwi);
 
        schedule();
 
                spin_lock(&lli->lli_sa_lock);
                sai->sai_index_wait = entry->se_index;
                spin_unlock(&lli->lli_sa_lock);
-               lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
+               lwi = LWI_TIMEOUT_INTR(30 * HZ, NULL,
                                       LWI_ON_SIGNAL_NOOP, NULL);
                rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
                if (rc < 0) {
 
        mutex_unlock(&lov->lov_lock);
 
        init_waitqueue_head(&waitq);
-       lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
-                                  cfs_time_seconds(1), NULL, NULL);
+       lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ,
+                                  HZ, NULL, NULL);
 
        rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
        if (tgt->ltd_active)
 
                               exp->exp_obd->obd_name, -EIO);
                        return -EIO;
                }
-               lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
+               lwi = LWI_TIMEOUT_INTR(resends * HZ, NULL, NULL,
                                       NULL);
                l_wait_event(waitq, 0, &lwi);
 
 
 
                if (rcl == -ESHUTDOWN &&
                    atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
-                       int secs = cfs_time_seconds(obd_timeout);
+                       int secs = obd_timeout * HZ;
                        struct obd_import *imp;
                        struct l_wait_info lwi;
 
 
 int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
                    long timeout)
 {
-       struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
+       struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout * HZ,
                                                  NULL, NULL, NULL);
        int rc;
 
 
                spin_unlock(&ec->ec_lock);
                CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
                set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout(HZ);
                lu_site_purge(env, ed->ed_site, -1);
                spin_lock(&ec->ec_lock);
        }
 
                           enum osc_extent_state state)
 {
        struct osc_object *obj = ext->oe_obj;
-       struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
+       struct l_wait_info lwi = LWI_TIMEOUT_INTR(600 * HZ, NULL,
                                                  LWI_ON_SIGNAL_NOOP, NULL);
        int rc = 0;
 
        struct l_wait_info lwi;
        int rc = -EDQUOT;
 
-       lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(AT_OFF ? obd_timeout : at_max),
+       lwi = LWI_TIMEOUT_INTR((AT_OFF ? obd_timeout : at_max) * HZ,
                               NULL, LWI_ON_SIGNAL_NOOP, NULL);
 
        OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
 
         * ll_file_is_contended.
         */
        retry_time = cfs_time_add(obj->oo_contention_time,
-                                 cfs_time_seconds(osc_contention_time));
+                                 osc_contention_time * HZ);
        if (cfs_time_after(cur_time, retry_time)) {
                osc_object_clear_contended(obj);
                return 0;
 
                         * fail_loc
                         */
                        set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(2));
+                       schedule_timeout(2 * HZ);
                        set_current_state(TASK_RUNNING);
                }
        }
                         * We still want to block for a limited time,
                         * so we allow interrupts during the timeout.
                         */
-                       lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
+                       lwi = LWI_TIMEOUT_INTR_ALL(HZ,
                                                   ptlrpc_expired_set,
                                                   ptlrpc_interrupted_set, set);
                else
                         * interrupts are allowed. Wait until all
                         * complete, or an in-flight req times out.
                         */
-                       lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
+                       lwi = LWI_TIMEOUT((timeout ? timeout : 1) * HZ,
                                          ptlrpc_expired_set, set);
 
                rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
                 * Network access will complete in finite time but the HUGE
                 * timeout lets us CWARN for visibility of sluggish NALs
                 */
-               lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
-                                          cfs_time_seconds(1), NULL, NULL);
+               lwi = LWI_TIMEOUT_INTERVAL(LONG_UNLINK * HZ,
+                                          HZ, NULL, NULL);
                rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
                                  &lwi);
                if (rc == 0) {
 
 
                        /* Wait for a bit */
                        init_waitqueue_head(&waitq);
-                       lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
+                       lwi = LWI_TIMEOUT(2 * HZ, NULL, NULL);
                        l_wait_event(waitq, 0, &lwi);
                        break;
                }
 
                 * have been locally cancelled by ptlrpc_abort_inflight.
                 */
                lwi = LWI_TIMEOUT_INTERVAL(
-                       cfs_timeout_cap(cfs_time_seconds(timeout)),
-                       (timeout > 1) ? cfs_time_seconds(1) :
-                       cfs_time_seconds(1) / 2,
+                       cfs_timeout_cap(timeout * HZ),
+                       (timeout > 1) ? HZ :
+                       HZ / 2,
                        NULL, NULL);
                rc = l_wait_event(imp->imp_recovery_waitq,
                                  (atomic_read(&imp->imp_inflight) == 0),
 int ptlrpc_reconnect_import(struct obd_import *imp)
 {
        struct l_wait_info lwi;
-       int secs = cfs_time_seconds(obd_timeout);
+       int secs = obd_timeout * HZ;
        int rc;
 
        ptlrpc_pinger_force(imp);
 
                if (AT_OFF) {
                        if (imp->imp_server_timeout)
-                               timeout = cfs_time_seconds(obd_timeout / 2);
+                               timeout = obd_timeout * HZ / 2;
                        else
-                               timeout = cfs_time_seconds(obd_timeout);
+                               timeout = obd_timeout * HZ;
                } else {
                        int idx = import_at_get_index(imp,
                                imp->imp_client->cli_request_portal);
-                       timeout = cfs_time_seconds(
-                               at_get(&imp->imp_at.iat_service_estimate[idx]));
+                       timeout = at_get(&imp->imp_at.iat_service_estimate[idx]) * HZ;
                }
 
                lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
 
                /* Network access will complete in finite time but the HUGE
                 * timeout lets us CWARN for visibility of sluggish LNDs
                 */
-               lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
-                                          cfs_time_seconds(1), NULL, NULL);
+               lwi = LWI_TIMEOUT_INTERVAL(LONG_UNLINK * HZ,
+                                          HZ, NULL, NULL);
                rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
                if (rc == 0) {
                        ptlrpc_rqphase_move(req, req->rq_next_phase);
 
                /* If we cannot get anything for some long time, we better
                 * bail out instead of waiting infinitely
                 */
-               lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
+               lwi = LWI_TIMEOUT(10 * HZ, NULL, NULL);
                rc = l_wait_event(svcpt->scp_rep_waitq,
                                  !list_empty(&svcpt->scp_rep_idle), &lwi);
                if (rc != 0)
 
        }
        mutex_unlock(&pinger_mutex);
 
-       return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
+       return cfs_time_sub(cfs_time_add(time, timeout * HZ),
                                         cfs_time_current());
 }
 
                        if (imp->imp_pingable && imp->imp_next_ping &&
                            cfs_time_after(imp->imp_next_ping,
                                           cfs_time_add(this_ping,
-                                                       cfs_time_seconds(PING_INTERVAL))))
+                                                       PING_INTERVAL * HZ)))
                                ptlrpc_update_next_ping(imp, 0);
                }
                mutex_unlock(&pinger_mutex);
                CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
                       time_to_next_wake,
                       cfs_time_add(this_ping,
-                                   cfs_time_seconds(PING_INTERVAL)));
+                                   PING_INTERVAL * HZ));
                if (time_to_next_wake > 0) {
                        lwi = LWI_TIMEOUT(max_t(long, time_to_next_wake,
-                                               cfs_time_seconds(1)),
+                                               HZ),
                                          NULL, NULL);
                        l_wait_event(thread->t_ctl_waitq,
                                     thread_is_stopping(thread) ||
 
 
        spin_lock(&req->rq_lock);
        if (req->rq_invalid_rqset) {
-               struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
+               struct l_wait_info lwi = LWI_TIMEOUT(5 * HZ,
                                                     back_to_sleep, NULL);
 
                req->rq_invalid_rqset = 0;
                int timeout;
 
                timeout = ptlrpc_set_next_timeout(set);
-               lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
+               lwi = LWI_TIMEOUT((timeout ? timeout : 1) * HZ,
                                  ptlrpc_expired_set, set);
 
                lu_context_enter(&env.le_ctx);
 
 
        if (!async) {
                struct l_wait_info lwi;
-               int secs = cfs_time_seconds(obd_timeout);
+               int secs = obd_timeout * HZ;
 
                CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
                       obd2cli_tgt(imp->imp_obd), secs);
 
                         * Wait for a timeout (unless something else
                         * happens) before I try again
                         */
-                       svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
+                       svcpt->scp_rqbd_timeout = HZ / 10;
                        CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
                               svcpt->scp_nrqbds_posted);
                }
 {
        while (1) {
                int rc;
-               struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
+               struct l_wait_info lwi = LWI_TIMEOUT(10 * HZ,
                                                     NULL, NULL);
 
                rc = l_wait_event(svcpt->scp_waitq,
                         * of sluggish LNDs
                         */
                        lwi = LWI_TIMEOUT_INTERVAL(
-                                       cfs_time_seconds(LONG_UNLINK),
-                                       cfs_time_seconds(1), NULL, NULL);
+                                       LONG_UNLINK * HZ,
+                                       HZ, NULL, NULL);
                        rc = l_wait_event(svcpt->scp_waitq,
                                          svcpt->scp_nrqbds_posted == 0, &lwi);
                        if (rc == -ETIMEDOUT) {