ANDROID: KVM: arm64: Donate memory per vcpu for each vcpu shadow state

Memory donated to the hypervisor needs to be contiguous, which
might be difficult to find. To improve the odds of finding
contiguous memory, break up vcpu state donations per vcpu.

Bug: 232070947
Signed-off-by: Fuad Tabba <tabba@google.com>
Change-Id: Iff19b2e2b6ca58b1e6ef38c4b0f16c80dae34ab9
This commit is contained in:
Fuad Tabba
2022-10-10 10:49:04 +01:00
committed by Will Deacon
parent 76094a9979
commit c0a7deb29d
4 changed files with 60 additions and 19 deletions

View File

@@ -54,8 +54,8 @@ struct kvm_shadow_vm {
struct hyp_pool pool;
hyp_spinlock_t lock;
/* Array of the shadow state per vcpu. */
struct shadow_vcpu_state shadow_vcpus[0];
/* Array of the shadow state pointers per vcpu. */
struct shadow_vcpu_state *shadow_vcpus[0];
};
static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
@@ -71,7 +71,9 @@ extern phys_addr_t pvmfw_size;
void hyp_shadow_table_init(void *tbl);
int __pkvm_init_shadow(struct kvm *kvm, void *shadow_va, size_t size, void *pgd);
int __pkvm_init_shadow_vcpu(unsigned int shadow_handle, struct kvm_vcpu *host_vcpu);
int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
struct kvm_vcpu *host_vcpu,
void *shadow_vcpu_hva);
int __pkvm_teardown_shadow(int shadow_handle);
struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx);
void put_shadow_vcpu(struct kvm_vcpu *vcpu);

View File

@@ -1010,8 +1010,11 @@ static void handle___pkvm_init_shadow_vcpu(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned int, shadow_handle, host_ctxt, 1);
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
DECLARE_REG(void *, shadow_vcpu_hva, host_ctxt, 3);
cpu_reg(host_ctxt, 1) = __pkvm_init_shadow_vcpu(shadow_handle, host_vcpu);
cpu_reg(host_ctxt, 1) = __pkvm_init_shadow_vcpu(shadow_handle,
host_vcpu,
shadow_vcpu_hva);
}
static void handle___pkvm_teardown_shadow(struct kvm_cpu_context *host_ctxt)

View File

@@ -278,7 +278,7 @@ struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx)
vm = find_shadow_by_handle(shadow_handle);
if (!vm || vm->nr_vcpus <= vcpu_idx)
goto unlock;
vcpu = &vm->shadow_vcpus[vcpu_idx].vcpu;
vcpu = &vm->shadow_vcpus[vcpu_idx]->vcpu;
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
if (unlikely(vcpu->arch.pkvm.loaded_on_cpu)) {
@@ -382,12 +382,12 @@ static void unpin_host_vcpu(struct shadow_vcpu_state *shadow_vcpu)
hyp_unpin_shared_mem(sve_state, sve_state + sve_state_size);
}
static void unpin_host_vcpus(struct shadow_vcpu_state *shadow_vcpus, int nr_vcpus)
static void unpin_host_vcpus(struct shadow_vcpu_state *shadow_vcpus[], int nr_vcpus)
{
int i;
for (i = 0; i < nr_vcpus; i++)
unpin_host_vcpu(&shadow_vcpus[i]);
unpin_host_vcpu(shadow_vcpus[i]);
}
static int init_ptrauth(struct kvm_vcpu *shadow_vcpu)
@@ -587,7 +587,7 @@ static size_t pkvm_get_shadow_size(int num_vcpus)
{
/* Shadow space for the vm struct and all of its vcpu states. */
return sizeof(struct kvm_shadow_vm) +
sizeof(struct shadow_vcpu_state) * num_vcpus;
sizeof(struct shadow_vcpu_state *) * num_vcpus;
}
/*
@@ -609,14 +609,14 @@ static int check_shadow_size(int nr_vcpus, size_t shadow_size)
return 0;
}
static void drain_shadow_vcpus(struct shadow_vcpu_state *shadow_vcpus,
static void drain_shadow_vcpus(struct shadow_vcpu_state *shadow_vcpus[],
unsigned int nr_vcpus,
struct kvm_hyp_memcache *mc)
{
int i;
for (i = 0; i < nr_vcpus; i++) {
struct kvm_vcpu *shadow_vcpu = &shadow_vcpus[i].vcpu;
struct kvm_vcpu *shadow_vcpu = &shadow_vcpus[i]->vcpu;
struct kvm_hyp_memcache *vcpu_mc = &shadow_vcpu->arch.pkvm_memcache;
void *addr;
@@ -727,17 +727,33 @@ err:
*
* shadow_handle: The handle for the protected vm.
* host_vcpu: A pointer to the corresponding host vcpu (host va).
* shadow_vcpu_hva: The host va of the area being donated for the vcpu state.
* Must be page aligned. The size of the area must be equal to
* the paged-aligned size of kvm_shadow_vcpu_state.
*
* Return 0 on success, negative error code on failure.
*/
int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
struct kvm_vcpu *host_vcpu)
struct kvm_vcpu *host_vcpu,
void *shadow_vcpu_hva)
{
struct kvm_shadow_vm *vm;
struct shadow_vcpu_state *shadow_state;
struct shadow_vcpu_state *shadow_state = kern_hyp_va(shadow_vcpu_hva);
size_t vcpu_state_sz = sizeof(*shadow_state);
u64 nr_pages = PAGE_ALIGN(vcpu_state_sz) >> PAGE_SHIFT;
unsigned int idx;
int ret;
if (!PAGE_ALIGNED(shadow_vcpu_hva))
return -EINVAL;
ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn(shadow_state),
nr_pages);
if (ret)
return ret;
memset(shadow_state, 0, vcpu_state_sz);
hyp_spin_lock(&shadow_lock);
vm = find_shadow_by_handle(shadow_handle);
@@ -752,14 +768,21 @@ int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
goto unlock;
}
shadow_state = &vm->shadow_vcpus[idx];
ret = init_shadow_vcpu(shadow_state, host_vcpu, vm, idx);
if (ret)
goto unlock;
vm->shadow_vcpus[idx] = shadow_state;
vm->nr_vcpus++;
unlock:
hyp_spin_unlock(&shadow_lock);
if (ret) {
memset(shadow_state, 0, vcpu_state_sz);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(shadow_state),
nr_pages));
}
return ret;
}
@@ -827,6 +850,9 @@ int __pkvm_teardown_shadow(int shadow_handle)
drain_shadow_vcpus(vm->shadow_vcpus, nr_vcpus, mc);
unpin_host_vcpus(vm->shadow_vcpus, nr_vcpus);
for (i = 0; i < nr_vcpus; i++)
teardown_donated_memory(mc, vm->shadow_vcpus[i],
PAGE_ALIGN(sizeof(vm->shadow_vcpus[i])));
teardown_donated_memory(mc, vm, vm->shadow_area_size);
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);

View File

@@ -118,7 +118,7 @@ void __init kvm_hyp_reserve(void)
static int __create_el2_shadow(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
size_t pgd_sz, shadow_sz;
size_t pgd_sz, shadow_sz, vcpu_state_sz;
void *pgd, *shadow_addr;
unsigned long idx;
int shadow_handle;
@@ -137,9 +137,9 @@ static int __create_el2_shadow(struct kvm *kvm)
if (!pgd)
return -ENOMEM;
/* Allocate memory to donate to hyp for the kvm and vcpu state. */
/* Allocate memory to donate to hyp for the kvm and vcpu state pointers. */
shadow_sz = PAGE_ALIGN(KVM_SHADOW_VM_SIZE +
SHADOW_VCPU_STATE_SIZE * kvm->created_vcpus);
sizeof(void *) * kvm->created_vcpus);
shadow_addr = alloc_pages_exact(shadow_sz, GFP_KERNEL_ACCOUNT);
if (!shadow_addr) {
ret = -ENOMEM;
@@ -157,18 +157,28 @@ static int __create_el2_shadow(struct kvm *kvm)
/* Store the shadow handle given by hyp for future call reference. */
kvm->arch.pkvm.shadow_handle = shadow_handle;
/* Initialize the shadow vcpus. */
/* Donate memory for the vcpu state at hyp and initialize it. */
vcpu_state_sz = PAGE_ALIGN(SHADOW_VCPU_STATE_SIZE);
kvm_for_each_vcpu (idx, vcpu, kvm) {
void *vcpu_state;
/* Indexing of the vcpus to be sequential starting at 0. */
if (WARN_ON(vcpu->vcpu_idx != idx)) {
ret = -EINVAL;
goto destroy_vm;
}
vcpu_state = alloc_pages_exact(vcpu_state_sz, GFP_KERNEL_ACCOUNT);
if (!vcpu_state) {
ret = -ENOMEM;
goto destroy_vm;
}
ret = kvm_call_hyp_nvhe(__pkvm_init_shadow_vcpu, shadow_handle,
vcpu);
if (ret)
vcpu, vcpu_state);
if (ret) {
free_pages_exact(vcpu_state, vcpu_state_sz);
goto destroy_vm;
}
}
return 0;