Make set_memory_x/set_memory_nx directly aware of if NX is supported
in the system or not, rather than requiring that every caller assesses
that support independently.
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Tim Starling <tstarling@wikimedia.org>
Cc: Hannes Eder <hannes@hanneseder.net>
LKML-Reference: <
1258154897-6770-4-git-send-email-hpa@zytor.com>
Acked-by: Kees Cook <kees.cook@canonical.com>
 
 {
        int error;
 
-       if (nx_enabled)
-               set_pages_x(image->control_code_page, 1);
+       set_pages_x(image->control_code_page, 1);
        error = machine_kexec_alloc_page_tables(image);
        if (error)
                return error;
  */
 void machine_kexec_cleanup(struct kimage *image)
 {
-       if (nx_enabled)
-               set_pages_nx(image->control_code_page, 1);
+       set_pages_nx(image->control_code_page, 1);
        machine_kexec_free_page_tables(image);
 }
 
 
 
 int set_memory_x(unsigned long addr, int numpages)
 {
+       if (!(__supported_pte_mask & _PAGE_NX))
+               return 0;
+
        return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
 }
 EXPORT_SYMBOL(set_memory_x);
 
 int set_memory_nx(unsigned long addr, int numpages)
 {
+       if (!(__supported_pte_mask & _PAGE_NX))
+               return 0;
+
        return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
 }
 EXPORT_SYMBOL(set_memory_nx);