/* If another context is idling then defer */
        if (engine->idling) {
-               kthread_queue_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(engine->kworker, &engine->pump_requests);
                goto out;
        }
 
 
                /* Only do teardown in the thread */
                if (!in_kthread) {
-                       kthread_queue_work(&engine->kworker,
+                       kthread_queue_work(engine->kworker,
                                           &engine->pump_requests);
                        goto out;
                }
        ret = ablkcipher_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               kthread_queue_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
        ret = ahash_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               kthread_queue_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
 
        req->base.complete(&req->base, err);
 
-       kthread_queue_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
 
 
        req->base.complete(&req->base, err);
 
-       kthread_queue_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 
        engine->running = true;
        spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-       kthread_queue_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(engine->kworker, &engine->pump_requests);
 
        return 0;
 }
        crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
        spin_lock_init(&engine->queue_lock);
 
-       kthread_init_worker(&engine->kworker);
-       engine->kworker_task = kthread_run(kthread_worker_fn,
-                                          &engine->kworker, "%s",
-                                          engine->name);
-       if (IS_ERR(engine->kworker_task)) {
+       engine->kworker = kthread_create_worker(0, "%s", engine->name);
+       if (IS_ERR(engine->kworker)) {
                dev_err(dev, "failed to create crypto request pump task\n");
                return NULL;
        }
 
        if (engine->rt) {
                dev_info(dev, "will run requests pump with realtime priority\n");
-               sched_setscheduler(engine->kworker_task, SCHED_FIFO, ¶m);
+               sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
        }
 
        return engine;
        if (ret)
                return ret;
 
-       kthread_flush_worker(&engine->kworker);
-       kthread_stop(engine->kworker_task);
+       kthread_destroy_worker(engine->kworker);
 
        return 0;
 }
 
  * @prepare_hash_request: do some prepare if need before handle the current request
  * @unprepare_hash_request: undo any work done by prepare_hash_request()
  * @hash_one_request: do hash for current request
- * @kworker: thread struct for request pump
- * @kworker_task: pointer to task for request pump kworker thread
+ * @kworker: kthread worker struct for request pump
  * @pump_requests: work struct for scheduling work to the request pump
  * @priv_data: the engine private data
  * @cur_req: the current request which is on processing
        int (*hash_one_request)(struct crypto_engine *engine,
                                struct ahash_request *req);
 
-       struct kthread_worker           kworker;
-       struct task_struct              *kworker_task;
+       struct kthread_worker           *kworker;
        struct kthread_work             pump_requests;
 
        void                            *priv_data;