/*
* SPDX-License-Identifier: GPL-2.0-or-later
- * Host specific cpu indentification for AArch64.
+ * Host specific cpu identification for AArch64.
*/
#ifndef HOST_CPUINFO_H
{
AspeedSoCState *soc = &bmc->soc;
- /* U10 24C08 connects to SDA/SCL Groupt 1 by default */
+ /* U10 24C08 connects to SDA/SCL Group 1 by default */
uint8_t *eeprom_buf = g_malloc0(32 * 1024);
smbus_eeprom_init_one(aspeed_i2c_get_bus(&soc->i2c, 0), 0x50, eeprom_buf);
{
/*
* The MPS2 TZ FPGA images have IDAUs in them which are connected to
- * the Master Security Controllers. Thes have the same logic as
+ * the Master Security Controllers. These have the same logic as
* is used by the IoTKit for the IDAU connected to the CPU, except
* that MSCs don't care about the NSC attribute.
*/
}
/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the
- * corrsponding bit in EISR is set.
+ * corresponding bit in EISR is set.
*/
static inline bool gic_lr_entry_is_eoi(uint32_t entry)
{
/* ??? This currently clears the pending bit for all CPUs, even
for per-CPU interrupts. It's unclear whether this is the
- corect behavior. */
+ correct behavior. */
if (value & (1 << i)) {
GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
}
/* Only the ProcessorSleep bit is writable. When the guest sets
* it, it requests that we transition the channel between the
* redistributor and the cpu interface to quiescent, and that
- * we set the ChildrenAsleep bit once the inteface has reached the
+ * we set the ChildrenAsleep bit once the interface has reached the
* quiescent state.
* Setting the ProcessorSleep to 0 reverses the quiescing, and
* ChildrenAsleep is cleared once the transition is complete.
vec->active = 0;
if (vec->level) {
/* Re-pend the exception if it's still held high; only
- * happens for extenal IRQs
+ * happens for external IRQs
*/
assert(irq >= NVIC_FIRST_IRQ);
vec->pending = 1;
/*
* mctl_r40_detect_rank_count in u-boot will write the high 1G of DDR
- * to detect wether the board support dual_rank or not. Create a virtual memory
+ * to detect whether the board support dual_rank or not. Create a virtual memory
* if the board's ram_size less or equal than 1G, and set read time out flag of
* REG_DRAMCTL_PGSR when the user touch this high dram.
*/
/*
- * Exynos4210 Pseudo Random Nubmer Generator Emulation
+ * Exynos4210 Pseudo Random Number Generator Emulation
*
* Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
*
* Some versions of the reference manual claim that UART2 is @
* 0x30870000, but experiments with HW + DT files in upstream
* Linux kernel show that not to be true and that block is
- * acutally located @ 0x30890000
+ * actually located @ 0x30890000
*/
FSL_IMX7_UART2_ADDR = 0x30890000,
FSL_IMX7_UART3_ADDR = 0x30880000,
*/
bool vectpending_is_s_banked;
int exception_prio; /* group prio of the highest prio active exception */
- int vectpending_prio; /* group prio of the exeception in vectpending */
+ int vectpending_prio; /* group prio of the exception in vectpending */
MemoryRegion sysregmem;
}
/*
- * The PSTATE bits only mask the interrupt if we have not overriden the
+ * The PSTATE bits only mask the interrupt if we have not overridden the
* ability above.
*/
return unmasked || pstate_unmasked;
return aa64;
}
-/* Function for determing whether guest cp register reads and writes should
+/* Function for determining whether guest cp register reads and writes should
* access the secure or non-secure bank of a cp register. When EL3 is
* operating in AArch32 state, the NS-bit determines whether the secure
* instance of a cp register should be used. When EL3 is AArch64 (or if
if (kvm_enabled()) {
/*
- * For KVM we have to automatically enable all supported unitialized
+ * For KVM we have to automatically enable all supported uninitialized
* lengths, even when the smaller lengths are not all powers-of-two.
*/
vq_map |= vq_supported & ~vq_init & vq_mask;
* pmevtyper_rawwrite is called between a pair of pmu_op_start and
* pmu_op_finish calls when loading saved state for a migration. Because
* we're potentially updating the type of event here, the value written to
- * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
+ * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
* different counter type. Therefore, we need to set this value to the
* current count for the counter type we're writing so that pmu_op_finish
* has the correct count for its calculation.
/*
* QEMU does not have a way to invalidate by physical address, thus
* invalidating a range of physical addresses is accomplished by
- * flushing all tlb entries in the outer sharable domain,
+ * flushing all tlb entries in the outer shareable domain,
* just like PAALLOS.
*/
{ .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
* R: 0 because unpriv and A flag not set
* SRVALID: 0 because NS
* MRVALID: 0 because unpriv and A flag not set
- * SREGION: 0 becaus SRVALID is 0
+ * SREGION: 0 because SRVALID is 0
* MREGION: 0 because MRVALID is 0
*/
return 0;
* + for EL2 and EL3 there is only one TBI bit, and if it is set
* then the address is zero-extended, clearing bits [63:56]
* + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
- * and TBI1 controls addressses with bit 55 == 1.
+ * and TBI1 controls addresses with bit 55 == 1.
* If the appropriate TBI bit is set for the address then
* the address is sign-extended from bit 55 into bits [63:56]
*
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
/*
- * A write to any coprocessor regiser that ends a TB
+ * A write to any coprocessor register that ends a TB
* must rebuild the hflags for the next TB.
*/
gen_rebuild_hflags(s);
* execution if it is not in an IT block. For us this means
* only that if PSR.ECI says we should not be executing the beat
* corresponding to the lane of the vector register being accessed
- * then we should skip perfoming the move, and that we need to do
+ * then we should skip performing the move, and that we need to do
* the usual check for bad ECI state and advance of ECI state.
* (If PSR.ECI is non-zero then we cannot be in an IT block.)
*/
* execution if it is not in an IT block. For us this means
* only that if PSR.ECI says we should not be executing the beat
* corresponding to the lane of the vector register being accessed
- * then we should skip perfoming the move, and that we need to do
+ * then we should skip performing the move, and that we need to do
* the usual check for bad ECI state and advance of ECI state.
* (If PSR.ECI is non-zero then we cannot be in an IT block.)
*/
/* Perform an inline saturating addition of a 32-bit value within
* a 64-bit register. The second operand is known to be positive,
- * which halves the comparisions we must perform to bound the result.
+ * which halves the comparisons we must perform to bound the result.
*/
static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
{
* Generate code for M-profile FP context handling: update the
* ownership of the FP context, and create a new context if
* necessary. This corresponds to the parts of the pseudocode
- * ExecuteFPCheck() after the inital PreserveFPState() call.
+ * ExecuteFPCheck() after the initial PreserveFPState() call.
*/
static void gen_update_fp_context(DisasContext *s)
{
* Process the entire segment at once, writing back the
* results only after we've consumed all of the inputs.
*
- * Key to indicies by column:
+ * Key to indices by column:
* i j i k j k
*/
sum00 = a[s + H4(0 + 0)];
from __future__ import print_function
#
-# Test the SVE registers are visable and changeable via gdbstub
+# Test the SVE registers are visible and changeable via gdbstub
#
# This is launched via tests/guest-debug/run-test.py
#
" fmopa za1.s, p0/m, p0/m, z0.s, z0.s\n"
/*
* Read the first 4x4 sub-matrix of elements from tile 1:
- * Note that za1h should be interchangable here.
+ * Note that za1h should be interchangeable here.
*/
" mov w12, #0\n"
" mova z0.s, p0/m, za1v.s[w12, #0]\n"
/*
* Semihosting interface on ARM AArch64
- * See "Semihosting for AArch32 and AArch64 Relase 2.0" by ARM
+ * See "Semihosting for AArch32 and AArch64 Release 2.0" by ARM
* w0 - semihosting call number
* x1 - semihosting parameter
*/
* T0SZ[5:0] = 2^(64 - 25)
*
* The size of T0SZ controls what the initial lookup level. It
- * would be nice to start at level 2 but unfortunatly for a
+ * would be nice to start at level 2 but unfortunately for a
* flat-mapping on the virt machine we need to handle IA's
* with at least 1gb range to see RAM. So we start with a
* level 1 lookup.
msr cpacr_el1, x0
/* Setup some stack space and enter the test code.
- * Assume everthing except the return value is garbage when we
+ * Assume everything except the return value is garbage when we
* return, we won't need it.
*/
adrp x0, stack_end
}
ptr_to_heap++;
}
- ml_printf("r/w to heap upto %p\n", ptr_to_heap);
+ ml_printf("r/w to heap up to %p\n", ptr_to_heap);
ml_printf("Passed HeapInfo checks\n");
return 0;