kvm-s390.c (f95937ccf5bd5e0a6bbac2b8e65a87982ffae403) | kvm-s390.c (1e753732bda6dcf888ea0b90b2a91ac1c1a0bae9) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * hosting IBM Z kernel virtual machines (s390x) 4 * 5 * Copyright IBM Corp. 2008, 2020 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> --- 52 unchanged lines hidden (view full) --- 61const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 62 KVM_GENERIC_VM_STATS(), 63 STATS_DESC_COUNTER(VM, inject_io), 64 STATS_DESC_COUNTER(VM, inject_float_mchk), 65 STATS_DESC_COUNTER(VM, inject_pfault_done), 66 STATS_DESC_COUNTER(VM, inject_service_signal), 67 STATS_DESC_COUNTER(VM, inject_virtio) 68}; | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * hosting IBM Z kernel virtual machines (s390x) 4 * 5 * Copyright IBM Corp. 2008, 2020 6 * 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com> --- 52 unchanged lines hidden (view full) --- 61const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 62 KVM_GENERIC_VM_STATS(), 63 STATS_DESC_COUNTER(VM, inject_io), 64 STATS_DESC_COUNTER(VM, inject_float_mchk), 65 STATS_DESC_COUNTER(VM, inject_pfault_done), 66 STATS_DESC_COUNTER(VM, inject_service_signal), 67 STATS_DESC_COUNTER(VM, inject_virtio) 68}; |
69static_assert(ARRAY_SIZE(kvm_vm_stats_desc) == 70 sizeof(struct kvm_vm_stat) / sizeof(u64)); |
|
69 70const struct kvm_stats_header kvm_vm_stats_header = { 71 .name_size = KVM_STATS_NAME_SIZE, 72 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 73 .id_offset = sizeof(struct kvm_stats_header), 74 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 75 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 76 sizeof(kvm_vm_stats_desc), --- 90 unchanged lines hidden (view full) --- 167 STATS_DESC_COUNTER(VCPU, diag_9c_ignored), 168 STATS_DESC_COUNTER(VCPU, diag_9c_forward), 169 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258), 170 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308), 171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500), 172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other), 173 STATS_DESC_COUNTER(VCPU, pfault_sync) 174}; | 71 72const struct kvm_stats_header kvm_vm_stats_header = { 73 .name_size = KVM_STATS_NAME_SIZE, 74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), 75 .id_offset = sizeof(struct kvm_stats_header), 76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 78 sizeof(kvm_vm_stats_desc), --- 90 unchanged lines hidden (view full) --- 169 STATS_DESC_COUNTER(VCPU, diag_9c_ignored), 170 STATS_DESC_COUNTER(VCPU, diag_9c_forward), 171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258), 172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308), 173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500), 174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other), 175 STATS_DESC_COUNTER(VCPU, pfault_sync) 176}; |
177static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) == 178 sizeof(struct kvm_vcpu_stat) / sizeof(u64)); |
|
175 176const struct kvm_stats_header kvm_vcpu_stats_header = { 177 .name_size = KVM_STATS_NAME_SIZE, 178 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 179 .id_offset = sizeof(struct kvm_stats_header), 180 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 181 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 182 sizeof(kvm_vcpu_stats_desc), --- 1761 unchanged lines hidden (view full) --- 1944/* 1945 * Similar to gfn_to_memslot, but returns the index of a memslot also when the 1946 * address falls in a hole. In that case the index of one of the memslots 1947 * bordering the hole is returned. 1948 */ 1949static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) 1950{ 1951 int start = 0, end = slots->used_slots; | 179 180const struct kvm_stats_header kvm_vcpu_stats_header = { 181 .name_size = KVM_STATS_NAME_SIZE, 182 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), 183 .id_offset = sizeof(struct kvm_stats_header), 184 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, 185 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + 186 sizeof(kvm_vcpu_stats_desc), --- 1761 unchanged lines hidden (view full) --- 1948/* 1949 * Similar to gfn_to_memslot, but returns the index of a memslot also when the 1950 * address falls in a hole. In that case the index of one of the memslots 1951 * bordering the hole is returned. 1952 */ 1953static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn) 1954{ 1955 int start = 0, end = slots->used_slots; |
1952 int slot = atomic_read(&slots->last_used_slot); | 1956 int slot = atomic_read(&slots->lru_slot); |
1953 struct kvm_memory_slot *memslots = slots->memslots; 1954 1955 if (gfn >= memslots[slot].base_gfn && 1956 gfn < memslots[slot].base_gfn + memslots[slot].npages) 1957 return slot; 1958 1959 while (start < end) { 1960 slot = start + (end - start) / 2; --- 4 unchanged lines hidden (view full) --- 1965 start = slot + 1; 1966 } 1967 1968 if (start >= slots->used_slots) 1969 return slots->used_slots - 1; 1970 1971 if (gfn >= memslots[start].base_gfn && 1972 gfn < memslots[start].base_gfn + memslots[start].npages) { | 1957 struct kvm_memory_slot *memslots = slots->memslots; 1958 1959 if (gfn >= memslots[slot].base_gfn && 1960 gfn < memslots[slot].base_gfn + memslots[slot].npages) 1961 return slot; 1962 1963 while (start < end) { 1964 slot = start + (end - start) / 2; --- 4 unchanged lines hidden (view full) --- 1969 start = slot + 1; 1970 } 1971 1972 if (start >= slots->used_slots) 1973 return slots->used_slots - 1; 1974 1975 if (gfn >= memslots[start].base_gfn && 1976 gfn < memslots[start].base_gfn + memslots[start].npages) { |
1973 atomic_set(&slots->last_used_slot, start); | 1977 atomic_set(&slots->lru_slot, start); |
1974 } 1975 1976 return start; 1977} 1978 1979static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, 1980 u8 *res, unsigned long bufsize) 1981{ --- 639 unchanged lines hidden (view full) --- 2621 cpuid.version = 0xff; 2622 return *((u64 *) &cpuid); 2623} 2624 2625static void kvm_s390_crypto_init(struct kvm *kvm) 2626{ 2627 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; 2628 kvm_s390_set_crycb_format(kvm); | 1978 } 1979 1980 return start; 1981} 1982 1983static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, 1984 u8 *res, unsigned long bufsize) 1985{ --- 639 unchanged lines hidden (view full) --- 2625 cpuid.version = 0xff; 2626 return *((u64 *) &cpuid); 2627} 2628 2629static void kvm_s390_crypto_init(struct kvm *kvm) 2630{ 2631 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; 2632 kvm_s390_set_crycb_format(kvm); |
2633 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); |
|
2629 2630 if (!test_kvm_facility(kvm, 76)) 2631 return; 2632 2633 /* Enable AES/DEA protected key functions by default */ 2634 kvm->arch.crypto.aes_kw = 1; 2635 kvm->arch.crypto.dea_kw = 1; 2636 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, --- 2462 unchanged lines hidden --- | 2634 2635 if (!test_kvm_facility(kvm, 76)) 2636 return; 2637 2638 /* Enable AES/DEA protected key functions by default */ 2639 kvm->arch.crypto.aes_kw = 1; 2640 kvm->arch.crypto.dea_kw = 1; 2641 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, --- 2462 unchanged lines hidden --- |