mirror of
https://github.com/Qortal/Brooklyn.git
synced 2025-02-01 07:42:18 +00:00
Mike is like a trained ape, only without the training.
This commit is contained in:
parent
3b9ab11f8a
commit
af3862aae6
@ -75,7 +75,7 @@ &emac {
|
||||
pinctrl-0 = <&emac_rgmii_pins>;
|
||||
phy-supply = <®_gmac_3v3>;
|
||||
phy-handle = <&ext_rgmii_phy>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -91,10 +91,12 @@ regulators {
|
||||
reg_vdd_soc: BUCK1 {
|
||||
regulator-name = "buck1";
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-max-microvolt = <850000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
regulator-ramp-delay = <3125>;
|
||||
nxp,dvs-run-voltage = <850000>;
|
||||
nxp,dvs-standby-voltage = <800000>;
|
||||
};
|
||||
|
||||
reg_vdd_arm: BUCK2 {
|
||||
@ -111,7 +113,7 @@ reg_vdd_arm: BUCK2 {
|
||||
reg_vdd_dram: BUCK3 {
|
||||
regulator-name = "buck3";
|
||||
regulator-min-microvolt = <850000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-max-microvolt = <950000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
};
|
||||
@ -150,7 +152,7 @@ reg_nvcc_snvs: LDO1 {
|
||||
|
||||
reg_vdd_snvs: LDO2 {
|
||||
regulator-name = "ldo2";
|
||||
regulator-min-microvolt = <850000>;
|
||||
regulator-min-microvolt = <800000>;
|
||||
regulator-max-microvolt = <900000>;
|
||||
regulator-boot-on;
|
||||
regulator-always-on;
|
||||
|
@ -163,6 +163,12 @@ config PAGE_OFFSET
|
||||
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
|
||||
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
|
||||
|
||||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN_GENERIC
|
||||
default 0xdfffffc800000000 if 64BIT
|
||||
default 0xffffffff if 32BIT
|
||||
|
||||
config ARCH_FLATMEM_ENABLE
|
||||
def_bool !NUMA
|
||||
|
||||
|
@ -30,8 +30,7 @@
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#define KASAN_SHADOW_START KERN_VIRT_START
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
||||
(64 - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||
|
||||
void kasan_init(void);
|
||||
asmlinkage void kasan_early_init(void);
|
||||
|
@ -193,6 +193,7 @@ setup_trap_vector:
|
||||
csrw CSR_SCRATCH, zero
|
||||
ret
|
||||
|
||||
.align 2
|
||||
.Lsecondary_park:
|
||||
/* We lack SMP support or have too many harts, so park this hart */
|
||||
wfi
|
||||
|
@ -17,6 +17,9 @@ asmlinkage void __init kasan_early_init(void)
|
||||
uintptr_t i;
|
||||
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
|
||||
|
||||
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
||||
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
@ -172,21 +175,10 @@ void __init kasan_init(void)
|
||||
phys_addr_t p_start, p_end;
|
||||
u64 i;
|
||||
|
||||
/*
|
||||
* Populate all kernel virtual address space with kasan_early_shadow_page
|
||||
* except for the linear mapping and the modules/kernel/BPF mapping.
|
||||
*/
|
||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)kasan_mem_to_shadow((void *)
|
||||
VMEMMAP_END));
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
|
||||
kasan_shallow_populate(
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
else
|
||||
kasan_populate_early_shadow(
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
||||
|
||||
/* Populate the linear mapping */
|
||||
for_each_mem_range(i, &p_start, &p_end) {
|
||||
|
@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
|
||||
if (i == NR_JIT_ITERATIONS) {
|
||||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
if (jit_data->header)
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
@ -166,6 +167,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
return prog;
|
||||
}
|
||||
|
||||
u64 bpf_jit_alloc_exec_limit(void)
|
||||
{
|
||||
return BPF_JIT_REGION_SIZE;
|
||||
}
|
||||
|
||||
void *bpf_jit_alloc_exec(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
|
||||
|
@ -78,7 +78,7 @@
|
||||
vpxor tmp0, x, x;
|
||||
|
||||
|
||||
.section .rodata.cst164, "aM", @progbits, 164
|
||||
.section .rodata.cst16, "aM", @progbits, 16
|
||||
.align 16
|
||||
|
||||
/*
|
||||
@ -133,6 +133,10 @@
|
||||
.L0f0f0f0f:
|
||||
.long 0x0f0f0f0f
|
||||
|
||||
/* 12 bytes, only for padding */
|
||||
.Lpadding_deadbeef:
|
||||
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||
|
||||
|
||||
.text
|
||||
.align 16
|
||||
|
@ -93,7 +93,7 @@
|
||||
vpxor tmp0, x, x;
|
||||
|
||||
|
||||
.section .rodata.cst164, "aM", @progbits, 164
|
||||
.section .rodata.cst16, "aM", @progbits, 16
|
||||
.align 16
|
||||
|
||||
/*
|
||||
@ -148,6 +148,10 @@
|
||||
.L0f0f0f0f:
|
||||
.long 0x0f0f0f0f
|
||||
|
||||
/* 12 bytes, only for padding */
|
||||
.Lpadding_deadbeef:
|
||||
.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
|
||||
|
||||
.text
|
||||
.align 16
|
||||
|
||||
|
@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
|
||||
|
||||
struct kvm_pio_request pio;
|
||||
void *pio_data;
|
||||
void *guest_ins_data;
|
||||
void *sev_pio_data;
|
||||
unsigned sev_pio_count;
|
||||
|
||||
u8 event_exit_inst_len;
|
||||
|
||||
@ -1097,7 +1098,7 @@ struct kvm_arch {
|
||||
u64 cur_tsc_generation;
|
||||
int nr_vcpus_matched_tsc;
|
||||
|
||||
spinlock_t pvclock_gtod_sync_lock;
|
||||
raw_spinlock_t pvclock_gtod_sync_lock;
|
||||
bool use_master_clock;
|
||||
u64 master_kernel_ns;
|
||||
u64 master_cycle_now;
|
||||
|
@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
|
||||
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u64 msr_val;
|
||||
int i;
|
||||
|
||||
if (!init_event) {
|
||||
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
||||
MSR_IA32_APICBASE_ENABLE;
|
||||
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
||||
msr_val |= MSR_IA32_APICBASE_BSP;
|
||||
kvm_lapic_set_base(vcpu, msr_val);
|
||||
}
|
||||
|
||||
if (!apic)
|
||||
@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
/* Stop the timer in case it's a reset to an active apic */
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
|
||||
if (!init_event) {
|
||||
apic->base_address = APIC_DEFAULT_PHYS_BASE;
|
||||
|
||||
/* The xAPIC ID is set at RESET even if the APIC was already enabled. */
|
||||
if (!init_event)
|
||||
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
|
||||
}
|
||||
kvm_apic_set_version(apic->vcpu);
|
||||
|
||||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||
@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
lapic_timer_advance_dynamic = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
|
||||
* apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
|
||||
*/
|
||||
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
|
||||
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
||||
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
||||
|
||||
@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
||||
void kvm_lapic_exit(void)
|
||||
{
|
||||
static_key_deferred_flush(&apic_hw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
|
||||
static_key_deferred_flush(&apic_sw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
|
||||
}
|
||||
|
@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
|
||||
unsigned bit;
|
||||
bool wp;
|
||||
|
||||
if (!is_cr4_pke(mmu)) {
|
||||
mmu->pkru_mask = 0;
|
||||
mmu->pkru_mask = 0;
|
||||
|
||||
if (!is_cr4_pke(mmu))
|
||||
return;
|
||||
}
|
||||
|
||||
wp = is_cr0_wp(mmu);
|
||||
|
||||
|
@ -618,7 +618,12 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vcpu->arch.guest_state_protected = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
@ -1479,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
goto e_free_trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
|
||||
* encrypts the written data with the guest's key, and the cache may
|
||||
* contain dirty, unencrypted data.
|
||||
*/
|
||||
sev_clflush_pages(guest_page, n);
|
||||
|
||||
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
|
||||
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
|
||||
data.guest_address |= sev_me_mask;
|
||||
@ -2579,11 +2591,20 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
||||
|
||||
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
||||
{
|
||||
if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
|
||||
int count;
|
||||
int bytes;
|
||||
|
||||
if (svm->vmcb->control.exit_info_2 > INT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port,
|
||||
svm->ghcb_sa, svm->ghcb_sa_len, in);
|
||||
count = svm->vmcb->control.exit_info_2;
|
||||
if (unlikely(check_mul_overflow(count, size, &bytes)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!setup_vmgexit_scratch(svm, in, bytes))
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
|
@ -5562,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
|
||||
vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
|
||||
return 0;
|
||||
/*
|
||||
* Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
|
||||
* VM-Exits. Unconditionally set the flag here and leave the handling to
|
||||
* vmx_handle_exit().
|
||||
*/
|
||||
to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6051,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
||||
int ret = __vmx_handle_exit(vcpu, exit_fastpath);
|
||||
|
||||
/*
|
||||
* Even when current exit reason is handled by KVM internally, we
|
||||
* still need to exit to user space when bus lock detected to inform
|
||||
* that there is a bus lock in guest.
|
||||
* Exit to user space when bus lock detected to inform that there is
|
||||
* a bus lock in guest.
|
||||
*/
|
||||
if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
|
||||
if (ret > 0)
|
||||
@ -6302,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
||||
|
||||
/*
|
||||
* If we are running L2 and L1 has a new pending interrupt
|
||||
* which can be injected, we should re-evaluate
|
||||
* what should be done with this new L1 interrupt.
|
||||
* If L1 intercepts external-interrupts, we should
|
||||
* exit from L2 to L1. Otherwise, interrupt should be
|
||||
* delivered directly to L2.
|
||||
* which can be injected, this may cause a vmexit or it may
|
||||
* be injected into L2. Either way, this interrupt will be
|
||||
* processed via KVM_REQ_EVENT, not RVI, because we do not use
|
||||
* virtual interrupt delivery to inject L1 interrupts into L2.
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && max_irr_updated) {
|
||||
if (nested_exit_on_intr(vcpu))
|
||||
kvm_vcpu_exiting_guest_mode(vcpu);
|
||||
else
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
if (is_guest_mode(vcpu) && max_irr_updated)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
} else {
|
||||
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
||||
}
|
||||
|
@ -2542,7 +2542,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
if (!matched) {
|
||||
kvm->arch.nr_vcpus_matched_tsc = 0;
|
||||
} else if (!already_matched) {
|
||||
@ -2550,7 +2550,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
}
|
||||
|
||||
kvm_track_tsc_matching(vcpu);
|
||||
spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||
}
|
||||
|
||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||
@ -2780,9 +2780,9 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
||||
kvm_make_mclock_inprogress_request(kvm);
|
||||
|
||||
/* no guest entries from this point */
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -2800,15 +2800,15 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
||||
unsigned long flags;
|
||||
u64 ret;
|
||||
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
if (!ka->use_master_clock) {
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
||||
}
|
||||
|
||||
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
||||
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
||||
get_cpu();
|
||||
@ -2902,13 +2902,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
* If the host uses TSC clock, then passthrough TSC as stable
|
||||
* to the guest.
|
||||
*/
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
use_master_clock = ka->use_master_clock;
|
||||
if (use_master_clock) {
|
||||
host_tsc = ka->master_cycle_now;
|
||||
kernel_ns = ka->master_kernel_ns;
|
||||
}
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
/* Keep irq disabled to prevent changes to the clock */
|
||||
local_irq_save(flags);
|
||||
@ -6100,13 +6100,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
* is slightly ahead) here we risk going negative on unsigned
|
||||
* 'system_time' when 'user_ns.clock' is very small.
|
||||
*/
|
||||
spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
raw_spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
if (kvm->arch.use_master_clock)
|
||||
now_ns = ka->master_kernel_ns;
|
||||
else
|
||||
now_ns = get_kvmclock_base_ns();
|
||||
ka->kvmclock_offset = user_ns.clock - now_ns;
|
||||
spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
raw_spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
||||
break;
|
||||
@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
|
||||
}
|
||||
|
||||
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val,
|
||||
unsigned short port,
|
||||
unsigned int count, bool in)
|
||||
{
|
||||
vcpu->arch.pio.port = port;
|
||||
@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
vcpu->arch.pio.count = count;
|
||||
vcpu->arch.pio.size = size;
|
||||
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data))
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->run->exit_reason = KVM_EXIT_IO;
|
||||
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
|
||||
@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, unsigned int count)
|
||||
{
|
||||
WARN_ON(vcpu->arch.pio.count);
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
return emulator_pio_in_out(vcpu, size, port, count, true);
|
||||
}
|
||||
|
||||
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
}
|
||||
|
||||
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
if (vcpu->arch.pio.count) {
|
||||
/* Complete previous iteration. */
|
||||
} else {
|
||||
int r = __emulator_pio_in(vcpu, size, port, count);
|
||||
if (!r)
|
||||
return r;
|
||||
|
||||
if (vcpu->arch.pio.count)
|
||||
goto data_avail;
|
||||
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
|
||||
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
|
||||
if (ret) {
|
||||
data_avail:
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
return 1;
|
||||
/* Results already available, fall through. */
|
||||
}
|
||||
|
||||
return 0;
|
||||
WARN_ON(count != vcpu->arch.pio.count);
|
||||
complete_emulator_pio_in(vcpu, val);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, const void *val,
|
||||
unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memcpy(vcpu->arch.pio_data, val, size * count);
|
||||
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
|
||||
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
|
||||
ret = emulator_pio_in_out(vcpu, size, port, count, false);
|
||||
if (ret)
|
||||
vcpu->arch.pio.count = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
@ -8139,9 +8156,9 @@ static void kvm_hyperv_tsc_notifier(void)
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
struct kvm_arch *ka = &kvm->arch;
|
||||
|
||||
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||
|
||||
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -8783,9 +8800,17 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
||||
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
||||
|
||||
/*
|
||||
* The call to kvm_ready_for_interrupt_injection() may end up in
|
||||
* kvm_xen_has_interrupt() which may require the srcu lock to be
|
||||
* held, to protect against changes in the vcpu_info address.
|
||||
*/
|
||||
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
kvm_run->ready_for_interrupt_injection =
|
||||
pic_in_kernel(vcpu->kvm) ||
|
||||
kvm_vcpu_ready_for_interrupt_injection(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
||||
|
||||
if (is_smm(vcpu))
|
||||
kvm_run->flags |= KVM_RUN_X86_SMM;
|
||||
@ -9643,14 +9668,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||
break;
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do this here before restoring debug registers on the host. And
|
||||
@ -11182,7 +11207,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
|
||||
mutex_init(&kvm->arch.apic_map_lock);
|
||||
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
||||
raw_spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
|
||||
|
||||
kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
|
||||
pvclock_update_vm_gtod_copy(kvm);
|
||||
@ -11392,7 +11417,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
||||
int level = i + 1;
|
||||
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
|
||||
|
||||
WARN_ON(slot->arch.rmap[i]);
|
||||
if (slot->arch.rmap[i])
|
||||
continue;
|
||||
|
||||
slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!slot->arch.rmap[i]) {
|
||||
@ -12367,44 +12393,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
|
||||
vcpu->arch.pio.count * vcpu->arch.pio.size);
|
||||
vcpu->arch.pio.count = 0;
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port);
|
||||
|
||||
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_outs(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
unsigned int port)
|
||||
{
|
||||
int ret;
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
|
||||
|
||||
ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* memcpy done already by emulator_pio_out. */
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
/* Emulation done by the kernel. */
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
unsigned int port);
|
||||
|
||||
ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
} else {
|
||||
vcpu->arch.guest_ins_data = data;
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
}
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_ins(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port)
|
||||
{
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
if (!__emulator_pio_in(vcpu, size, port, count))
|
||||
break;
|
||||
|
||||
/* Emulation done by the kernel. */
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -12412,8 +12475,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count,
|
||||
int in)
|
||||
{
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
|
||||
: kvm_sev_es_outs(vcpu, size, port, data, count);
|
||||
vcpu->arch.sev_pio_data = data;
|
||||
vcpu->arch.sev_pio_count = count;
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port)
|
||||
: kvm_sev_es_outs(vcpu, size, port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
|
||||
|
||||
|
@ -190,6 +190,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
|
||||
|
||||
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
{
|
||||
int err;
|
||||
u8 rc = 0;
|
||||
|
||||
/*
|
||||
@ -216,13 +217,29 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
|
||||
if (likely(slots->generation == ghc->generation &&
|
||||
!kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
|
||||
/* Fast path */
|
||||
__get_user(rc, (u8 __user *)ghc->hva + offset);
|
||||
} else {
|
||||
/* Slow path */
|
||||
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
||||
sizeof(rc));
|
||||
pagefault_disable();
|
||||
err = __get_user(rc, (u8 __user *)ghc->hva + offset);
|
||||
pagefault_enable();
|
||||
if (!err)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Slow path */
|
||||
|
||||
/*
|
||||
* This function gets called from kvm_vcpu_block() after setting the
|
||||
* task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
|
||||
* from a HLT. So we really mustn't sleep. If the page ended up absent
|
||||
* at that point, just return 1 in order to trigger an immediate wake,
|
||||
* and we'll end up getting called again from a context where we *can*
|
||||
* fault in the page and wait for it.
|
||||
*/
|
||||
if (in_atomic() || !task_is_running(current))
|
||||
return 1;
|
||||
|
||||
kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
|
||||
sizeof(rc));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user