Unneeded casts prevent the compiler from performing valuable checks.
This is especially true for function pointers.
Remove these casts, to prevent silently introducing bugs when a
variable's type might be changed in the future.
No change in generated code.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
                if (!key)
                        return -ENOMEM;
 
-               key_dma_addr = dma_map_single(dev, (void *)key, keylen,
-                                             DMA_TO_DEVICE);
+               key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, key_dma_addr)) {
                        dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
                                key, keylen);
        }
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_aead_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_aead_complete;
+       cc_req.user_arg = req;
 
        /* Setup request context */
        areq_ctx->gen_ctx.op_type = direct;
 
        u32 nents;
 
        nents = sg_nents_for_len(sg, end);
-       sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+       sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
                       (direct == CC_SG_TO_BUF));
 }
 
 
        /* Map IV buffer */
        if (ivsize) {
-               dump_byte_array("iv", (u8 *)info, ivsize);
+               dump_byte_array("iv", info, ivsize);
                req_ctx->gen_ctx.iv_dma_addr =
-                       dma_map_single(dev, (void *)info,
-                                      ivsize, DMA_BIDIRECTIONAL);
+                       dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
                if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
                        dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
                                ivsize, info);
 
                ctx_p->user.key);
 
        /* Map key buffer */
-       ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+       ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key,
                                                  max_key_buf_size,
                                                  DMA_TO_DEVICE);
        if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
 
        dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n",
                ctx_p, crypto_tfm_alg_name(tfm), keylen);
-       dump_byte_array("key", (u8 *)key, keylen);
+       dump_byte_array("key", key, keylen);
 
        /* STAT_PHASE_0: Init and sanity checks */
 
 
        dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
                ctx_p, crypto_tfm_alg_name(tfm), keylen);
-       dump_byte_array("key", (u8 *)key, keylen);
+       dump_byte_array("key", key, keylen);
 
        /* STAT_PHASE_0: Init and sanity checks */
 
        }
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_cipher_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_cipher_complete;
+       cc_req.user_arg = req;
 
        /* Setup CPP operation details */
        if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
 
                        return -ENOMEM;
 
                ctx->key_params.key_dma_addr =
-                       dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+                       dma_map_single(dev, ctx->key_params.key, keylen,
                                       DMA_TO_DEVICE);
                if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
                        dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
        ctx->key_params.keylen = 0;
 
        ctx->digest_buff_dma_addr =
-               dma_map_single(dev, (void *)ctx->digest_buff,
-                              sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+               dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
+                              DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
                dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
                        sizeof(ctx->digest_buff), ctx->digest_buff);
                &ctx->digest_buff_dma_addr);
 
        ctx->opad_tmp_keys_dma_addr =
-               dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
+               dma_map_single(dev, ctx->opad_tmp_keys_buff,
                               sizeof(ctx->opad_tmp_keys_buff),
                               DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
        idx++;
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_update_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_update_complete;
+       cc_req.user_arg = req;
 
        rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
        if (rc != -EINPROGRESS && rc != -EBUSY) {
        }
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_hash_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_hash_complete;
+       cc_req.user_arg = req;
 
        if (state->xcbc_count && rem_cnt == 0) {
                /* Load key for ECB decryption */
        }
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_hash_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_hash_complete;
+       cc_req.user_arg = req;
 
        if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
                key_len = CC_AES_128_BIT_KEY_SIZE;
        }
 
        /* Setup request structure */
-       cc_req.user_cb = (void *)cc_digest_complete;
-       cc_req.user_arg = (void *)req;
+       cc_req.user_cb = cc_digest_complete;
+       cc_req.user_arg = req;
 
        if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
                key_len = CC_AES_128_BIT_KEY_SIZE;