Skip to content

Commit 3272518

Browse files
Maxim Levitskybonzini
authored andcommitted
x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation
JIRA: https://issues.redhat.com/browse/RHEL-95318 commit c560bc9 Author: Maxim Levitsky <mlevitsk@redhat.com> Date: Mon May 12 14:04:05 2025 -0400 x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation Use kvm_lock_all_vcpus instead of sev's own implementation. Because kvm_lock_all_vcpus uses the _nest_lock feature of lockdep, which ignores subclasses, there is no longer a need to use separate subclasses for source and target VMs. No functional change intended. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Message-ID: <20250512180407.659015-5-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 5dd4e19 commit 3272518

File tree

1 file changed

+4
-68
lines changed

1 file changed

+4
-68
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1907,70 +1907,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
19071907
atomic_set_release(&src_sev->migration_in_progress, 0);
19081908
}
19091909

1910-
/* vCPU mutex subclasses. */
1911-
enum sev_migration_role {
1912-
SEV_MIGRATION_SOURCE = 0,
1913-
SEV_MIGRATION_TARGET,
1914-
SEV_NR_MIGRATION_ROLES,
1915-
};
1916-
1917-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1918-
enum sev_migration_role role)
1919-
{
1920-
struct kvm_vcpu *vcpu;
1921-
unsigned long i, j;
1922-
1923-
kvm_for_each_vcpu(i, vcpu, kvm) {
1924-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1925-
goto out_unlock;
1926-
1927-
#ifdef CONFIG_PROVE_LOCKING
1928-
if (!i)
1929-
/*
1930-
* Reset the role to one that avoids colliding with
1931-
* the role used for the first vcpu mutex.
1932-
*/
1933-
role = SEV_NR_MIGRATION_ROLES;
1934-
else
1935-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1936-
#endif
1937-
}
1938-
1939-
return 0;
1940-
1941-
out_unlock:
1942-
1943-
kvm_for_each_vcpu(j, vcpu, kvm) {
1944-
if (i == j)
1945-
break;
1946-
1947-
#ifdef CONFIG_PROVE_LOCKING
1948-
if (j)
1949-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1950-
#endif
1951-
1952-
mutex_unlock(&vcpu->mutex);
1953-
}
1954-
return -EINTR;
1955-
}
1956-
1957-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1958-
{
1959-
struct kvm_vcpu *vcpu;
1960-
unsigned long i;
1961-
bool first = true;
1962-
1963-
kvm_for_each_vcpu(i, vcpu, kvm) {
1964-
if (first)
1965-
first = false;
1966-
else
1967-
mutex_acquire(&vcpu->mutex.dep_map,
1968-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1969-
1970-
mutex_unlock(&vcpu->mutex);
1971-
}
1972-
}
1973-
19741910
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19751911
{
19761912
struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
@@ -2111,10 +2047,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21112047
charged = true;
21122048
}
21132049

2114-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2050+
ret = kvm_lock_all_vcpus(kvm);
21152051
if (ret)
21162052
goto out_dst_cgroup;
2117-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2053+
ret = kvm_lock_all_vcpus(source_kvm);
21182054
if (ret)
21192055
goto out_dst_vcpu;
21202056

@@ -2128,9 +2064,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21282064
ret = 0;
21292065

21302066
out_source_vcpu:
2131-
sev_unlock_vcpus_for_migration(source_kvm);
2067+
kvm_unlock_all_vcpus(source_kvm);
21322068
out_dst_vcpu:
2133-
sev_unlock_vcpus_for_migration(kvm);
2069+
kvm_unlock_all_vcpus(kvm);
21342070
out_dst_cgroup:
21352071
/* Operates on the source on success, on the destination on failure. */
21362072
if (charged)

0 commit comments

Comments
 (0)