#ifndef __MIPS_ASM_MIPS_CM_H__
 #define __MIPS_ASM_MIPS_CM_H__
 
+#include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
 
 #define CM_GCR_REV_MINOR                       GENMASK(7, 0)
 
 #define CM_ENCODE_REV(major, minor) \
-               (((major) << __ffs(CM_GCR_REV_MAJOR)) | \
-                ((minor) << __ffs(CM_GCR_REV_MINOR)))
+               (FIELD_PREP(CM_GCR_REV_MAJOR, major) | \
+                FIELD_PREP(CM_GCR_REV_MINOR, minor))
 
 #define CM_REV_CM2                             CM_ENCODE_REV(6, 0)
 #define CM_REV_CM2_5                           CM_ENCODE_REV(7, 0)
 static inline unsigned int mips_cm_max_vp_width(void)
 {
        extern int smp_num_siblings;
-       uint32_t cfg;
 
        if (mips_cm_revision() >= CM_REV_CM3)
-               return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW;
+               return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW,
+                                read_gcr_sys_config2());
 
        if (mips_cm_present()) {
                /*
                 * number of VP(E)s, and if that ever changes then this will
                 * need revisiting.
                 */
-               cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE;
-               return (cfg >> __ffs(CM_GCR_Cx_CONFIG_PVPE)) + 1;
+               return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1;
        }
 
        if (IS_ENABLED(CONFIG_SMP))
 
        phys_addr_t addr;
 
        /* L2-only sync was introduced with CM major revision 6 */
-       major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >>
-               __ffs(CM_GCR_REV_MAJOR);
+       major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev());
        if (major_rev < 6)
                return;
 
        preempt_disable();
 
        if (cm_rev >= CM_REV_CM3) {
-               val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
-               val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);
+               val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) |
+                     FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
 
                if (cm_rev >= CM_REV_CM3_5) {
                        val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
-                       val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER);
-                       val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
+                       val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
+                       val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
                } else {
                        WARN_ON(cluster != 0);
                        WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
                spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
                                  per_cpu(cm_core_lock_flags, curr_core));
 
-               val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM);
+               val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core);
        }
 
        write_gcr_cl_other(val);
        cm_other = read_gcr_error_mult();
 
        if (revision < CM_REV_CM3) { /* CM2 */
-               cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE);
-               ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
+               cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+               ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
 
                if (!cause)
                        return;
                ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
                ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
 
-               cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE);
-               ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND);
+               cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+               ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
 
                if (!cause)
                        return;