/*
* SPDX-License-Identifier: GPL-2.0-or-later
- * Host specific cpu indentification for x86.
+ * Host specific cpu identification for x86.
*/
#ifndef HOST_CPUINFO_H
*
* Returns an array of 128 routes, one for each device,
* based on device location.
- * The main goal is to equaly distribute the interrupts
+ * The main goal is to equally distribute the interrupts
* over the 4 existing ACPI links (works only for i440fx).
* The hash function is (slot + pin) & 3 -> "LNK[D|A|B|C]".
*
}
/*
- * Insert DMAR scope for PCI bridges and endpoint devcie
+ * Insert DMAR scope for PCI bridges and endpoint devices
*/
static void
insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque)
pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
PCI_STATUS_SIG_TARGET_ABORT);
}
-/* log an illegal comand event
+/* log an illegal command event
* @addr : address of illegal command
*/
static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
break;
case AMDVI_MMIO_COMMAND_BASE:
amdvi_mmio_reg_write(s, size, val, addr);
- /* FIXME - make sure System Software has finished writing incase
+ /* FIXME - make sure System Software has finished writing in case
* it writes in chucks less than 8 bytes in a robust way.As for
* now, this hacks works for the linux driver
*/
/*
* PCI bus number (or SID) is not reliable since the device is usaully
- * initalized before guest can configure the PCI bridge
+ * initialized before guest can configure the PCI bridge
* (SECONDARY_BUS_NUMBER).
*/
struct vtd_as_key {
* """
*
* We enable per as memory region (iommu_ir_fault) for catching
- * the tranlsation for interrupt range through PASID + PT.
+ * the translation for interrupt range through PASID + PT.
*/
if (pt && as->pasid != PCI_NO_PASID) {
memory_region_set_enabled(&as->iommu_ir_fault, true);
/*
* This matches the barrier in copy_to_ring() (or the guest's
- * equivalent) betweem writing the data to the ring and updating
+ * equivalent) between writing the data to the ring and updating
* rsp_prod. It protects against the pathological case (which
* again I think never happened except on Alpha) where our
* subsequent writes to the ring could *cross* the read of
/*
* If we already wrote this node, refer to the previous copy.
* There's no rename/move in XenStore, so all we need to find
- * it is the tx_id of the transation in which it exists. Which
+ * it is the tx_id of the transaction in which it exists. Which
* may be the root tx.
*/
if (n->serialized_tx != XBT_NULL) {
return 0xffffffffffffffffULL;
}
-/* MSDOS compatibility mode FPU exception support */
+/* MS-DOS compatibility mode FPU exception support */
static void ioportF0_write(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
if (value > 16 * MiB) {
error_setg(errp,
"User specified max allowed firmware size %" PRIu64 " is "
- "greater than 16MiB. If combined firwmare size exceeds "
+ "greater than 16MiB. If combined firmware size exceeds "
"16MiB the system may not boot, or experience intermittent"
"stability issues.",
value);
*
* This code should be compatible with AMD's "Extended Method" described at:
* AMD CPUID Specification (Publication #25481)
- * Section 3: Multiple Core Calcuation
+ * Section 3: Multiple Core Calculation
* as long as:
* nr_threads is set to 1;
* OFFSET_IDX is assumed to be 0;
return name;
}
-/* Compatibily hack to maintain legacy +-feat semantic,
+/* Compatibility hack to maintain legacy +-feat semantic,
* where +-feat overwrites any feature set by
* feat=on|feat even if the later is parsed after +-feat
* (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
* The initial value of xcr0 and ebx == 0, On host without kvm
* commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
* even through guest update xcr0, this will crash some legacy guest
- * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
+ * (e.g., CentOS 6), So set ebx == ecx to workaround it.
*/
*ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false);
} else if (count == 1) {
#define CPUID_EXT2_3DNOWEXT (1U << 30)
#define CPUID_EXT2_3DNOW (1U << 31)
-/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+/* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */
#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
CPUID_EXT2_DE | CPUID_EXT2_PSE | \
CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
MemTxAttrs *attrs);
int cpu_get_pic_interrupt(CPUX86State *s);
-/* MSDOS compatibility mode FPU exception support */
+/* MS-DOS compatibility mode FPU exception support */
void x86_register_ferr_irq(qemu_irq irq);
void fpu_check_raise_ferr_irq(CPUX86State *s);
void cpu_set_ignne(void);
/*
* Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
* root operation upon vCPU reset. kvm_put_msr_feature_control() should also
- * preceed kvm_put_nested_state() when 'real' nested state is set.
+ * precede kvm_put_nested_state() when 'real' nested state is set.
*/
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
}
/*
- * Handled untranslated compatibilty format interrupt with
+ * Handled untranslated compatibility format interrupt with
* extended destination ID in the low bits 11-5. */
dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
#define MILLISECS(_ms) ((int64_t)((_ms) * 1000000ULL))
#define MICROSECS(_us) ((int64_t)((_us) * 1000ULL))
#define STIME_MAX ((time_t)((int64_t)~0ull >> 1))
-/* Chosen so (NOW() + delta) wont overflow without an uptime of 200 years */
+/* Chosen so (NOW() + delta) won't overflow without an uptime of 200 years */
#define STIME_DELTA_MAX ((int64_t)((uint64_t)~0ull >> 2))
static int vcpuop_set_periodic_timer(CPUState *cs, CPUState *target,
* hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
* should not be set yet in the respective vCPU register.
* Thus, in case an exception is pending, it is
- * important to save the exception payload seperately.
+ * important to save the exception payload separately.
*
* Therefore, if an exception is not in a pending state
* or vCPU is not in guest-mode, it is not important to
* distinguish between a pending and injected exception
- * and we don't need to store seperately the exception payload.
+ * and we don't need to store separately the exception payload.
*
* In order to preserve better backwards-compatible migration,
* convert a pending exception to an injected exception in
}
/* perform a conditional store into register 'reg' according to jump opcode
- value 'b'. In the fast case, T0 is guaranted not to be used. */
+ value 'b'. In the fast case, T0 is guaranteed not to be used. */
static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
{
int inv, jcc_op, cond;
}
/* generate a conditional jump to label 'l1' according to jump opcode
- value 'b'. In the fast case, T0 is guaranted not to be used. */
+ value 'b'. In the fast case, T0 is guaranteed not to be used. */
static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
{
CCPrepare cc = gen_prepare_cc(s, b, s->T0);
}
/* Generate a conditional jump to label 'l1' according to jump opcode
- value 'b'. In the fast case, T0 is guaranted not to be used.
+ value 'b'. In the fast case, T0 is guaranteed not to be used.
A translation block must end soon. */
static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
{
if (s->prefix & PREFIX_LOCK) {
switch (op) {
case 0: /* bt */
- /* Needs no atomic ops; we surpressed the normal
+ /* Needs no atomic ops; we suppressed the normal
memory load for LOCK above so do it now. */
gen_op_ld_v(s, ot, s->T0, s->A0);
break;
add $8,%esp
/*
- * Don't worry about stack frame, assume everthing
+ * Don't worry about stack frame, assume everything
* is garbage when we return, we won't need it.
*/
call main
#
# 4. The instruction encoding. For example, "C1 /4 ib".
#
-# 5. The validity of the instruction in 32-bit (aka compatiblity, legacy) mode.
+# 5. The validity of the instruction in 32-bit (aka compatibility, legacy) mode.
#
# 6. The validity of the instruction in 64-bit mode.
#