1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kvm guest debug support 4 * 5 * Copyright IBM Corp. 2014 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License (version 2 only) 9 * as published by the Free Software Foundation. 10 * 11 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 12 */ 13 #include <linux/kvm_host.h> 14 #include <linux/errno.h> 15 #include "kvm-s390.h" 16 #include "gaccess.h" 17 18 /* 19 * Extends the address range given by *start and *stop to include the address 20 * range starting with estart and the length len. Takes care of overflowing 21 * intervals and tries to minimize the overall interval size. 22 */ 23 static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) 24 { 25 u64 estop; 26 27 if (len > 0) 28 len--; 29 else 30 len = 0; 31 32 estop = estart + len; 33 34 /* 0-0 range represents "not set" */ 35 if ((*start == 0) && (*stop == 0)) { 36 *start = estart; 37 *stop = estop; 38 } else if (*start <= *stop) { 39 /* increase the existing range */ 40 if (estart < *start) 41 *start = estart; 42 if (estop > *stop) 43 *stop = estop; 44 } else { 45 /* "overflowing" interval, whereby *stop > *start */ 46 if (estart <= *stop) { 47 if (estop > *stop) 48 *stop = estop; 49 } else if (estop > *start) { 50 if (estart < *start) 51 *start = estart; 52 } 53 /* minimize the range */ 54 else if ((estop - *stop) < (*start - estart)) 55 *stop = estop; 56 else 57 *start = estart; 58 } 59 } 60 61 #define MAX_INST_SIZE 6 62 63 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) 64 { 65 unsigned long start, len; 66 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; 67 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; 68 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; 69 int i; 70 71 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || 72 vcpu->arch.guestdbg.hw_bp_info == NULL) 73 return; 74 75 /* 76 * If the guest is not interested in branching events, we can safely 77 * limit them to the PER address range. 78 */ 79 if (!(*cr9 & PER_EVENT_BRANCH)) 80 *cr9 |= PER_CONTROL_BRANCH_ADDRESS; 81 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH; 82 83 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { 84 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; 85 len = vcpu->arch.guestdbg.hw_bp_info[i].len; 86 87 /* 88 * The instruction in front of the desired bp has to 89 * report instruction-fetching events 90 */ 91 if (start < MAX_INST_SIZE) { 92 len += start; 93 start = 0; 94 } else { 95 start -= MAX_INST_SIZE; 96 len += MAX_INST_SIZE; 97 } 98 99 extend_address_range(cr10, cr11, start, len); 100 } 101 } 102 103 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) 104 { 105 unsigned long start, len; 106 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; 107 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; 108 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; 109 int i; 110 111 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || 112 vcpu->arch.guestdbg.hw_wp_info == NULL) 113 return; 114 115 /* if host uses storage alternation for special address 116 * spaces, enable all events and give all to the guest */ 117 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { 118 *cr9 &= ~PER_CONTROL_ALTERATION; 119 *cr10 = 0; 120 *cr11 = -1UL; 121 } else { 122 *cr9 &= ~PER_CONTROL_ALTERATION; 123 *cr9 |= PER_EVENT_STORE; 124 125 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 126 start = vcpu->arch.guestdbg.hw_wp_info[i].addr; 127 len = vcpu->arch.guestdbg.hw_wp_info[i].len; 128 129 extend_address_range(cr10, cr11, start, len); 130 } 131 } 132 } 133 134 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu) 135 { 136 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; 137 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; 138 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; 139 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; 140 } 141 142 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu) 143 { 144 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; 145 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; 146 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; 147 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; 148 } 149 150 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) 151 { 152 /* 153 * TODO: if guest psw has per enabled, otherwise 0s! 154 * This reduces the amount of reported events. 155 * Need to intercept all psw changes! 156 */ 157 158 if (guestdbg_sstep_enabled(vcpu)) { 159 /* disable timer (clock-comparator) interrupts */ 160 vcpu->arch.sie_block->gcr[0] &= ~0x800ul; 161 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; 162 vcpu->arch.sie_block->gcr[10] = 0; 163 vcpu->arch.sie_block->gcr[11] = -1UL; 164 } 165 166 if (guestdbg_hw_bp_enabled(vcpu)) { 167 enable_all_hw_bp(vcpu); 168 enable_all_hw_wp(vcpu); 169 } 170 171 /* TODO: Instruction-fetching-nullification not allowed for now */ 172 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) 173 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; 174 } 175 176 #define MAX_WP_SIZE 100 177 178 static int __import_wp_info(struct kvm_vcpu *vcpu, 179 struct kvm_hw_breakpoint *bp_data, 180 struct kvm_hw_wp_info_arch *wp_info) 181 { 182 int ret = 0; 183 wp_info->len = bp_data->len; 184 wp_info->addr = bp_data->addr; 185 wp_info->phys_addr = bp_data->phys_addr; 186 wp_info->old_data = NULL; 187 188 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE) 189 return -EINVAL; 190 191 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL); 192 if (!wp_info->old_data) 193 return -ENOMEM; 194 /* try to backup the original value */ 195 ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, 196 wp_info->len); 197 if (ret) { 198 kfree(wp_info->old_data); 199 wp_info->old_data = NULL; 200 } 201 202 return ret; 203 } 204 205 #define MAX_BP_COUNT 50 206 207 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, 208 struct kvm_guest_debug *dbg) 209 { 210 int ret = 0, nr_wp = 0, nr_bp = 0, i; 211 struct kvm_hw_breakpoint *bp_data = NULL; 212 struct kvm_hw_wp_info_arch *wp_info = NULL; 213 struct kvm_hw_bp_info_arch *bp_info = NULL; 214 215 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) 216 return 0; 217 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) 218 return -EINVAL; 219 220 bp_data = memdup_user(dbg->arch.hw_bp, 221 sizeof(*bp_data) * dbg->arch.nr_hw_bp); 222 if (IS_ERR(bp_data)) 223 return PTR_ERR(bp_data); 224 225 for (i = 0; i < dbg->arch.nr_hw_bp; i++) { 226 switch (bp_data[i].type) { 227 case KVM_HW_WP_WRITE: 228 nr_wp++; 229 break; 230 case KVM_HW_BP: 231 nr_bp++; 232 break; 233 default: 234 break; 235 } 236 } 237 238 if (nr_wp > 0) { 239 wp_info = kmalloc_array(nr_wp, 240 sizeof(*wp_info), 241 GFP_KERNEL); 242 if (!wp_info) { 243 ret = -ENOMEM; 244 goto error; 245 } 246 } 247 if (nr_bp > 0) { 248 bp_info = kmalloc_array(nr_bp, 249 sizeof(*bp_info), 250 GFP_KERNEL); 251 if (!bp_info) { 252 ret = -ENOMEM; 253 goto error; 254 } 255 } 256 257 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { 258 switch (bp_data[i].type) { 259 case KVM_HW_WP_WRITE: 260 ret = __import_wp_info(vcpu, &bp_data[i], 261 &wp_info[nr_wp]); 262 if (ret) 263 goto error; 264 nr_wp++; 265 break; 266 case KVM_HW_BP: 267 bp_info[nr_bp].len = bp_data[i].len; 268 bp_info[nr_bp].addr = bp_data[i].addr; 269 nr_bp++; 270 break; 271 } 272 } 273 274 vcpu->arch.guestdbg.nr_hw_bp = nr_bp; 275 vcpu->arch.guestdbg.hw_bp_info = bp_info; 276 vcpu->arch.guestdbg.nr_hw_wp = nr_wp; 277 vcpu->arch.guestdbg.hw_wp_info = wp_info; 278 return 0; 279 error: 280 kfree(bp_data); 281 kfree(wp_info); 282 kfree(bp_info); 283 return ret; 284 } 285 286 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu) 287 { 288 int i; 289 struct kvm_hw_wp_info_arch *hw_wp_info = NULL; 290 291 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 292 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; 293 kfree(hw_wp_info->old_data); 294 hw_wp_info->old_data = NULL; 295 } 296 kfree(vcpu->arch.guestdbg.hw_wp_info); 297 vcpu->arch.guestdbg.hw_wp_info = NULL; 298 299 kfree(vcpu->arch.guestdbg.hw_bp_info); 300 vcpu->arch.guestdbg.hw_bp_info = NULL; 301 302 vcpu->arch.guestdbg.nr_hw_wp = 0; 303 vcpu->arch.guestdbg.nr_hw_bp = 0; 304 } 305 306 static inline int in_addr_range(u64 addr, u64 a, u64 b) 307 { 308 if (a <= b) 309 return (addr >= a) && (addr <= b); 310 else 311 /* "overflowing" interval */ 312 return (addr >= a) || (addr <= b); 313 } 314 315 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1) 316 317 static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu, 318 unsigned long addr) 319 { 320 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; 321 int i; 322 323 if (vcpu->arch.guestdbg.nr_hw_bp == 0) 324 return NULL; 325 326 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { 327 /* addr is directly the start or in the range of a bp */ 328 if (addr == bp_info->addr) 329 goto found; 330 if (bp_info->len > 0 && 331 in_addr_range(addr, bp_info->addr, end_of_range(bp_info))) 332 goto found; 333 334 bp_info++; 335 } 336 337 return NULL; 338 found: 339 return bp_info; 340 } 341 342 static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) 343 { 344 int i; 345 struct kvm_hw_wp_info_arch *wp_info = NULL; 346 void *temp = NULL; 347 348 if (vcpu->arch.guestdbg.nr_hw_wp == 0) 349 return NULL; 350 351 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 352 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; 353 if (!wp_info || !wp_info->old_data || wp_info->len <= 0) 354 continue; 355 356 temp = kmalloc(wp_info->len, GFP_KERNEL); 357 if (!temp) 358 continue; 359 360 /* refetch the wp data and compare it to the old value */ 361 if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, 362 wp_info->len)) { 363 if (memcmp(temp, wp_info->old_data, wp_info->len)) { 364 kfree(temp); 365 return wp_info; 366 } 367 } 368 kfree(temp); 369 temp = NULL; 370 } 371 372 return NULL; 373 } 374 375 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) 376 { 377 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 378 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; 379 } 380 381 #define PER_CODE_MASK (PER_EVENT_MASK >> 24) 382 #define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24) 383 #define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24) 384 #define PER_CODE_STORE (PER_EVENT_STORE >> 24) 385 #define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24) 386 387 #define per_bp_event(code) \ 388 (code & (PER_CODE_IFETCH | PER_CODE_BRANCH)) 389 #define per_write_wp_event(code) \ 390 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL)) 391 392 static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc, 393 unsigned long peraddr) 394 { 395 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; 396 struct kvm_hw_wp_info_arch *wp_info = NULL; 397 struct kvm_hw_bp_info_arch *bp_info = NULL; 398 unsigned long addr = vcpu->arch.sie_block->gpsw.addr; 399 400 if (guestdbg_hw_bp_enabled(vcpu)) { 401 if (per_write_wp_event(perc) && 402 vcpu->arch.guestdbg.nr_hw_wp > 0) { 403 wp_info = any_wp_changed(vcpu); 404 if (wp_info) { 405 debug_exit->addr = wp_info->addr; 406 debug_exit->type = KVM_HW_WP_WRITE; 407 goto exit_required; 408 } 409 } 410 if (per_bp_event(perc) && 411 vcpu->arch.guestdbg.nr_hw_bp > 0) { 412 bp_info = find_hw_bp(vcpu, addr); 413 /* remove duplicate events if PC==PER address */ 414 if (bp_info && (addr != peraddr)) { 415 debug_exit->addr = addr; 416 debug_exit->type = KVM_HW_BP; 417 vcpu->arch.guestdbg.last_bp = addr; 418 goto exit_required; 419 } 420 /* breakpoint missed */ 421 bp_info = find_hw_bp(vcpu, peraddr); 422 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { 423 debug_exit->addr = peraddr; 424 debug_exit->type = KVM_HW_BP; 425 goto exit_required; 426 } 427 } 428 } 429 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) { 430 debug_exit->addr = addr; 431 debug_exit->type = KVM_SINGLESTEP; 432 goto exit_required; 433 } 434 435 return 0; 436 exit_required: 437 return 1; 438 } 439 440 static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr) 441 { 442 u8 exec_ilen = 0; 443 u16 opcode[3]; 444 int rc; 445 446 if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) { 447 /* PER address references the fetched or the execute instr */ 448 *addr = vcpu->arch.sie_block->peraddr; 449 /* 450 * Manually detect if we have an EXECUTE instruction. As 451 * instructions are always 2 byte aligned we can read the 452 * first two bytes unconditionally 453 */ 454 rc = read_guest_instr(vcpu, *addr, &opcode, 2); 455 if (rc) 456 return rc; 457 if (opcode[0] >> 8 == 0x44) 458 exec_ilen = 4; 459 if ((opcode[0] & 0xff0f) == 0xc600) 460 exec_ilen = 6; 461 } else { 462 /* instr was suppressed, calculate the responsible instr */ 463 *addr = __rewind_psw(vcpu->arch.sie_block->gpsw, 464 kvm_s390_get_ilen(vcpu)); 465 if (vcpu->arch.sie_block->icptstatus & 0x01) { 466 exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4; 467 if (!exec_ilen) 468 exec_ilen = 4; 469 } 470 } 471 472 if (exec_ilen) { 473 /* read the complete EXECUTE instr to detect the fetched addr */ 474 rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen); 475 if (rc) 476 return rc; 477 if (exec_ilen == 6) { 478 /* EXECUTE RELATIVE LONG - RIL-b format */ 479 s32 rl = *((s32 *) (opcode + 1)); 480 481 /* rl is a _signed_ 32 bit value specifying halfwords */ 482 *addr += (u64)(s64) rl * 2; 483 } else { 484 /* EXECUTE - RX-a format */ 485 u32 base = (opcode[1] & 0xf000) >> 12; 486 u32 disp = opcode[1] & 0x0fff; 487 u32 index = opcode[0] & 0x000f; 488 489 *addr = base ? vcpu->run->s.regs.gprs[base] : 0; 490 *addr += index ? vcpu->run->s.regs.gprs[index] : 0; 491 *addr += disp; 492 } 493 *addr = kvm_s390_logical_to_effective(vcpu, *addr); 494 } 495 return 0; 496 } 497 498 #define guest_per_enabled(vcpu) \ 499 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) 500 501 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) 502 { 503 const u64 cr10 = vcpu->arch.sie_block->gcr[10]; 504 const u64 cr11 = vcpu->arch.sie_block->gcr[11]; 505 const u8 ilen = kvm_s390_get_ilen(vcpu); 506 struct kvm_s390_pgm_info pgm_info = { 507 .code = PGM_PER, 508 .per_code = PER_CODE_IFETCH, 509 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), 510 }; 511 unsigned long fetched_addr; 512 int rc; 513 514 /* 515 * The PSW points to the next instruction, therefore the intercepted 516 * instruction generated a PER i-fetch event. PER address therefore 517 * points at the previous PSW address (could be an EXECUTE function). 518 */ 519 if (!guestdbg_enabled(vcpu)) 520 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 521 522 if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address)) 523 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 524 525 if (!guest_per_enabled(vcpu) || 526 !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH)) 527 return 0; 528 529 rc = per_fetched_addr(vcpu, &fetched_addr); 530 if (rc < 0) 531 return rc; 532 if (rc) 533 /* instruction-fetching exceptions */ 534 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 535 536 if (in_addr_range(fetched_addr, cr10, cr11)) 537 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 538 return 0; 539 } 540 541 static int filter_guest_per_event(struct kvm_vcpu *vcpu) 542 { 543 const u8 perc = vcpu->arch.sie_block->perc; 544 u64 addr = vcpu->arch.sie_block->gpsw.addr; 545 u64 cr9 = vcpu->arch.sie_block->gcr[9]; 546 u64 cr10 = vcpu->arch.sie_block->gcr[10]; 547 u64 cr11 = vcpu->arch.sie_block->gcr[11]; 548 /* filter all events, demanded by the guest */ 549 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK; 550 unsigned long fetched_addr; 551 int rc; 552 553 if (!guest_per_enabled(vcpu)) 554 guest_perc = 0; 555 556 /* filter "successful-branching" events */ 557 if (guest_perc & PER_CODE_BRANCH && 558 cr9 & PER_CONTROL_BRANCH_ADDRESS && 559 !in_addr_range(addr, cr10, cr11)) 560 guest_perc &= ~PER_CODE_BRANCH; 561 562 /* filter "instruction-fetching" events */ 563 if (guest_perc & PER_CODE_IFETCH) { 564 rc = per_fetched_addr(vcpu, &fetched_addr); 565 if (rc < 0) 566 return rc; 567 /* 568 * Don't inject an irq on exceptions. This would make handling 569 * on icpt code 8 very complex (as PSW was already rewound). 570 */ 571 if (rc || !in_addr_range(fetched_addr, cr10, cr11)) 572 guest_perc &= ~PER_CODE_IFETCH; 573 } 574 575 /* All other PER events will be given to the guest */ 576 /* TODO: Check altered address/address space */ 577 578 vcpu->arch.sie_block->perc = guest_perc; 579 580 if (!guest_perc) 581 vcpu->arch.sie_block->iprcc &= ~PGM_PER; 582 return 0; 583 } 584 585 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) 586 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH) 587 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) 588 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) 589 590 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) 591 { 592 int rc, new_as; 593 594 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, 595 vcpu->arch.sie_block->peraddr)) 596 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 597 598 rc = filter_guest_per_event(vcpu); 599 if (rc) 600 return rc; 601 602 /* 603 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger 604 * a space-switch event. PER events enforce space-switch events 605 * for these instructions. So if no PER event for the guest is left, 606 * we might have to filter the space-switch element out, too. 607 */ 608 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { 609 vcpu->arch.sie_block->iprcc = 0; 610 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; 611 612 /* 613 * If the AS changed from / to home, we had RP, SAC or SACF 614 * instruction. Check primary and home space-switch-event 615 * controls. (theoretically home -> home produced no event) 616 */ 617 if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) && 618 (pssec(vcpu) || hssec(vcpu))) 619 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 620 621 /* 622 * PT, PTI, PR, PC instruction operate on primary AS only. Check 623 * if the primary-space-switch-event control was or got set. 624 */ 625 if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) && 626 (pssec(vcpu) || old_ssec(vcpu))) 627 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 628 } 629 return 0; 630 } 631