Open Martins3 opened 3 years ago
static int __noclone vz_run_vcpu(struct vz_vcpu *vcpu)
__noclone
kvm_arch_vcpu_create
/* FPU/MSA context management */
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 4bd89d8d775b..0397432d8db7 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -305,6 +305,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
+ // TODO seems we are recovering epc back !
/* Set Guest EPC */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC);
@@ -363,6 +364,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
/* Root ASID Dealias (RAD) */
+ // TODO set asid ?
/* Save host ASID */
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
@@ -462,6 +464,9 @@ static void *kvm_mips_build_enter_guest(void *addr)
return p;
}
+// TODO compare this with `build_loongson3_tlb_refill_handler`,
+// Almost same, but no check_for_high_segbits, why ?
+// In fact, we support 48bit address space ?
/**
* kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
* @addr: Address to start writing code.
@@ -572,7 +577,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*
* Returns: Next address after end of written function.
*/
-void *kvm_mips_build_exception(void *addr, void *handler)
+void *dune_mips_build_exception(void *addr, void *handler)
{
u32 *p = addr;
struct uasm_label labels[2];
@@ -674,12 +679,17 @@ void *kvm_mips_build_exit(void *addr)
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ // why save GPA into guest cp0_badvaddr
+ // guest cp0's badvaddr contains the GVA, so it's fair reasonable
+ //
+ // 1. TODO guest has a separate cp0, find the evidence
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
K1);
uasm_i_mfc0(&p, K0, C0_CAUSE);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+ // TODO what's gscause
uasm_i_mfc0(&p, K0, C0_GSCAUSE);
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_gscause), K1);
diff --git a/arch/mips/kvm/ls3acomp-vz.c b/arch/mips/kvm/ls3acomp-vz.c
index 5701010f1d4c..589bdf2f725e 100644
--- a/arch/mips/kvm/ls3acomp-vz.c
+++ b/arch/mips/kvm/ls3acomp-vz.c
@@ -2735,6 +2735,9 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
/* This will clobber guest TLB contents too */
ret = 1;
}
+ // TODO asid seems only related to flush
+ // - [ ] emulate.c:kvm_mips_change_entryhi
+ // - [ ] mips also implement shadow MMU
/*
* For Root ASID Dealias (RAD) we don't do anything here, but we
* still need the request to ensure we recheck asid_flush_mask.
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f844c4135735..212280961827 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -458,6 +458,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
handler);
}
+ // TODO what's difference between this and host normal exit handler ?
+ // - what does it contains ?
/* General exit handler */
p = handler;
p = kvm_mips_build_exit(p);
@@ -1944,6 +1946,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (ret == RESUME_GUEST)
kvm_timer_callbacks->acquire_htimer(vcpu);
+ // TODO summarize the relation between variable er and ret
+ // RESUME_HOST : something terrible
+ // RESUME_GUEST : continue to guest
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
kvm_mips_deliver_interrupts(vcpu, cause);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index e2840cad409f..913543b014e1 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -181,6 +181,7 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
+ // kvm->arch.gpa_mm
return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
}
@@ -924,6 +925,9 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy)
{
+ // TODO Yes, this function fix the broken map from gpa => pa,
+ // but entry is loaded to TLB, otherwise the gpa => pa is load to TLB
+ // by hardware automatically.
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
gfn_t gfn = gpa >> PAGE_SHIFT;
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 874960acdc52..988d29494602 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -1395,6 +1395,7 @@ static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
return EMULATE_FAIL;
}
+// TODO Virtualization Manual : Table 5.3 GuestCtl0 GExcCode values
static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
@@ -1411,6 +1412,10 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GSFC:
+ // 4.7.8 Guest Software Field Change Exception
+ //
+ // This exception can only be raised by a D/MTC0 instruction executed in guest mode.
+ // Changes to the following CP0 register bitfields always trigger the exception.
++vcpu->stat.vz_gsfc_exits;
er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
break;
@@ -1430,6 +1435,7 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
break;
case MIPS_GCTL0_GEXC_GHFC:
++vcpu->stat.vz_ghfc_exits;
+ // 4.7.9 Guest Hardware Field Change Exception
er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GPA:
@@ -3205,6 +3211,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.vcpu_load = kvm_vz_vcpu_load,
.vcpu_put = kvm_vz_vcpu_put,
.vcpu_run = kvm_vz_vcpu_run,
+ // TODO difference between reenter and enter
.vcpu_reenter = kvm_vz_vcpu_reenter,
};
/**
* Allocate comm page for guest kernel, a TLB will be reserved for
* mapping GVA @ 0xFFFF8000 to this page
*/
/* COP0 state is mapped into Guest kernel via commpage */
/* wired guest TLB entries */
struct kvm_mips_tlb *wired_tlb;
unsigned int wired_tlb_limit;
unsigned int wired_tlb_used;
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
void vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
EXPORT_SYMBOL_GPL(vcpu_load);
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
sthread
is.sthread
kvm_sigset_activate(vcpu);
what should be inited and how?
currently, I just want to do a following how dune works:
use Loongson VZ instead of VZ