struct mutex tx_lock; /* lock to protect Tx buffer */
        void *rx_buffer;
        void *tx_buffer;
+       bool mem_ops_native;
 };
 
 static struct ffa_drv_info *drv_info;
        return 0;
 }
 
+static void ffa_set_up_mem_ops_native_flag(void)
+{
+       if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
+           !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
+               drv_info->mem_ops_native = true;
+}
+
 static u32 ffa_api_version_get(void)
 {
        return drv_info->version;
 static int
 ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
 {
-       if (dev->mode_32bit)
-               return ffa_memory_ops(FFA_MEM_SHARE, args);
+       if (drv_info->mem_ops_native)
+               return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
 
-       return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+       return ffa_memory_ops(FFA_MEM_SHARE, args);
 }
 
 static int
         * however on systems without a hypervisor the responsibility
         * falls to the calling kernel driver to prevent access.
         */
-       if (dev->mode_32bit)
-               return ffa_memory_ops(FFA_MEM_LEND, args);
+       if (drv_info->mem_ops_native)
+               return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
 
-       return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+       return ffa_memory_ops(FFA_MEM_LEND, args);
 }
 
 static const struct ffa_dev_ops ffa_ops = {
 
        ffa_setup_partitions();
 
+       ffa_set_up_mem_ops_native_flag();
+
        return 0;
 free_pages:
        if (drv_info->tx_buffer)