LiveIntervals.cpp 63.1 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
//===- LiveIntervals.cpp - Live Interval Analysis -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file implements the LiveInterval analysis pass which is used
/// by the Linear Scan Register allocator. This pass linearizes the
/// basic blocks of the function in DFS order and computes live intervals for
/// each virtual and physical register.
//
//===----------------------------------------------------------------------===//

#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveRangeCalc.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <tuple>
#include <utility>

using namespace llvm;

#define DEBUG_TYPE "regalloc"

char LiveIntervals::ID = 0;
char &llvm::LiveIntervalsID = LiveIntervals::ID;
INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
                "Live Interval Analysis", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
                "Live Interval Analysis", false, false)

#ifndef NDEBUG
static cl::opt<bool> EnablePrecomputePhysRegs(
  "precompute-phys-liveness", cl::Hidden,
  cl::desc("Eagerly compute live intervals for all physreg units."));
#else
static bool EnablePrecomputePhysRegs = false;
#endif // NDEBUG

namespace llvm {

cl::opt<bool> UseSegmentSetForPhysRegs(
    "use-segment-set-for-physregs", cl::Hidden, cl::init(true),
    cl::desc(
        "Use segment set for the computation of the live ranges of physregs."));

} // end namespace llvm

void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
  AU.setPreservesCFG();
  AU.addRequired<AAResultsWrapperPass>();
  AU.addPreserved<AAResultsWrapperPass>();
  AU.addPreserved<LiveVariables>();
  AU.addPreservedID(MachineLoopInfoID);
  AU.addRequiredTransitiveID(MachineDominatorsID);
  AU.addPreservedID(MachineDominatorsID);
  AU.addPreserved<SlotIndexes>();
  AU.addRequiredTransitive<SlotIndexes>();
  MachineFunctionPass::getAnalysisUsage(AU);
}

LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) {
  initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}

LiveIntervals::~LiveIntervals() {
  delete LRCalc;
}

void LiveIntervals::releaseMemory() {
  // Free the live intervals themselves.
  for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
    delete VirtRegIntervals[Register::index2VirtReg(i)];
  VirtRegIntervals.clear();
  RegMaskSlots.clear();
  RegMaskBits.clear();
  RegMaskBlocks.clear();

  for (LiveRange *LR : RegUnitRanges)
    delete LR;
  RegUnitRanges.clear();

  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
  VNInfoAllocator.Reset();
}

bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
  MF = &fn;
  MRI = &MF->getRegInfo();
  TRI = MF->getSubtarget().getRegisterInfo();
  TII = MF->getSubtarget().getInstrInfo();
  AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
  Indexes = &getAnalysis<SlotIndexes>();
  DomTree = &getAnalysis<MachineDominatorTree>();

  if (!LRCalc)
    LRCalc = new LiveRangeCalc();

  // Allocate space for all virtual registers.
  VirtRegIntervals.resize(MRI->getNumVirtRegs());

  computeVirtRegs();
  computeRegMasks();
  computeLiveInRegUnits();

  if (EnablePrecomputePhysRegs) {
    // For stress testing, precompute live ranges of all physical register
    // units, including reserved registers.
    for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
      getRegUnit(i);
  }
  LLVM_DEBUG(dump());
  return true;
}

void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
  OS << "********** INTERVALS **********\n";

  // Dump the regunits.
  for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit)
    if (LiveRange *LR = RegUnitRanges[Unit])
      OS << printRegUnit(Unit, TRI) << ' ' << *LR << '\n';

  // Dump the virtregs.
  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
    unsigned Reg = Register::index2VirtReg(i);
    if (hasInterval(Reg))
      OS << getInterval(Reg) << '\n';
  }

  OS << "RegMasks:";
  for (SlotIndex Idx : RegMaskSlots)
    OS << ' ' << Idx;
  OS << '\n';

  printInstrs(OS);
}

void LiveIntervals::printInstrs(raw_ostream &OS) const {
  OS << "********** MACHINEINSTRS **********\n";
  MF->print(OS, Indexes);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
  printInstrs(dbgs());
}
#endif

LiveInterval* LiveIntervals::createInterval(unsigned reg) {
  float Weight = Register::isPhysicalRegister(reg) ? huge_valf : 0.0F;
  return new LiveInterval(reg, Weight);
}

/// Compute the live interval of a virtual register, based on defs and uses.
bool LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
  assert(LRCalc && "LRCalc not initialized.");
  assert(LI.empty() && "Should only compute empty intervals.");
  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
  LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
  return computeDeadValues(LI, nullptr);
}

void LiveIntervals::computeVirtRegs() {
  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
    unsigned Reg = Register::index2VirtReg(i);
    if (MRI->reg_nodbg_empty(Reg))
      continue;
    LiveInterval &LI = createEmptyInterval(Reg);
    bool NeedSplit = computeVirtRegInterval(LI);
    if (NeedSplit) {
      SmallVector<LiveInterval*, 8> SplitLIs;
      splitSeparateComponents(LI, SplitLIs);
    }
  }
}

void LiveIntervals::computeRegMasks() {
  RegMaskBlocks.resize(MF->getNumBlockIDs());

  // Find all instructions with regmask operands.
  for (const MachineBasicBlock &MBB : *MF) {
    std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
    RMB.first = RegMaskSlots.size();

    // Some block starts, such as EH funclets, create masks.
    if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) {
      RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB));
      RegMaskBits.push_back(Mask);
    }

    for (const MachineInstr &MI : MBB) {
      for (const MachineOperand &MO : MI.operands()) {
        if (!MO.isRegMask())
          continue;
        RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
        RegMaskBits.push_back(MO.getRegMask());
      }
    }

    // Some block ends, such as funclet returns, create masks. Put the mask on
    // the last instruction of the block, because MBB slot index intervals are
    // half-open.
    if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
      assert(!MBB.empty() && "empty return block?");
      RegMaskSlots.push_back(
          Indexes->getInstructionIndex(MBB.back()).getRegSlot());
      RegMaskBits.push_back(Mask);
    }

    // Compute the number of register mask instructions in this block.
    RMB.second = RegMaskSlots.size() - RMB.first;
  }
}

//===----------------------------------------------------------------------===//
//                           Register Unit Liveness
//===----------------------------------------------------------------------===//
//
// Fixed interference typically comes from ABI boundaries: Function arguments
// and return values are passed in fixed registers, and so are exception
// pointers entering landing pads. Certain instructions require values to be
// present in specific registers. That is also represented through fixed
// interference.
//

/// Compute the live range of a register unit, based on the uses and defs of
/// aliasing registers.  The range should be empty, or contain only dead
/// phi-defs from ABI blocks.
void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
  assert(LRCalc && "LRCalc not initialized.");
  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());

  // The physregs aliasing Unit are the roots and their super-registers.
  // Create all values as dead defs before extending to uses. Note that roots
  // may share super-registers. That's OK because createDeadDefs() is
  // idempotent. It is very rare for a register unit to have multiple roots, so
  // uniquing super-registers is probably not worthwhile.
  bool IsReserved = false;
  for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
    bool IsRootReserved = true;
    for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
         Super.isValid(); ++Super) {
      unsigned Reg = *Super;
      if (!MRI->reg_empty(Reg))
        LRCalc->createDeadDefs(LR, Reg);
      // A register unit is considered reserved if all its roots and all their
      // super registers are reserved.
      if (!MRI->isReserved(Reg))
        IsRootReserved = false;
    }
    IsReserved |= IsRootReserved;
  }
  assert(IsReserved == MRI->isReservedRegUnit(Unit) &&
         "reserved computation mismatch");

  // Now extend LR to reach all uses.
  // Ignore uses of reserved registers. We only track defs of those.
  if (!IsReserved) {
    for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
      for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
           Super.isValid(); ++Super) {
        unsigned Reg = *Super;
        if (!MRI->reg_empty(Reg))
          LRCalc->extendToUses(LR, Reg);
      }
    }
  }

  // Flush the segment set to the segment vector.
  if (UseSegmentSetForPhysRegs)
    LR.flushSegmentSet();
}

/// Precompute the live ranges of any register units that are live-in to an ABI
/// block somewhere. Register values can appear without a corresponding def when
/// entering the entry block or a landing pad.
void LiveIntervals::computeLiveInRegUnits() {
  RegUnitRanges.resize(TRI->getNumRegUnits());
  LLVM_DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");

  // Keep track of the live range sets allocated.
  SmallVector<unsigned, 8> NewRanges;

  // Check all basic blocks for live-ins.
  for (const MachineBasicBlock &MBB : *MF) {
    // We only care about ABI blocks: Entry + landing pads.
    if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty())
      continue;

    // Create phi-defs at Begin for all live-in registers.
    SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
    LLVM_DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB));
    for (const auto &LI : MBB.liveins()) {
      for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
        unsigned Unit = *Units;
        LiveRange *LR = RegUnitRanges[Unit];
        if (!LR) {
          // Use segment set to speed-up initial computation of the live range.
          LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
          NewRanges.push_back(Unit);
        }
        VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
        (void)VNI;
        LLVM_DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id);
      }
    }
    LLVM_DEBUG(dbgs() << '\n');
  }
  LLVM_DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");

  // Compute the 'normal' part of the ranges.
  for (unsigned Unit : NewRanges)
    computeRegUnitRange(*RegUnitRanges[Unit], Unit);
}

static void createSegmentsForValues(LiveRange &LR,
    iterator_range<LiveInterval::vni_iterator> VNIs) {
  for (VNInfo *VNI : VNIs) {
    if (VNI->isUnused())
      continue;
    SlotIndex Def = VNI->def;
    LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
  }
}

void LiveIntervals::extendSegmentsToUses(LiveRange &Segments,
                                         ShrinkToUsesWorkList &WorkList,
                                         unsigned Reg, LaneBitmask LaneMask) {
  // Keep track of the PHIs that are in use.
  SmallPtrSet<VNInfo*, 8> UsedPHIs;
  // Blocks that have already been added to WorkList as live-out.
  SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;

  auto getSubRange = [](const LiveInterval &I, LaneBitmask M)
        -> const LiveRange& {
    if (M.none())
      return I;
    for (const LiveInterval::SubRange &SR : I.subranges()) {
      if ((SR.LaneMask & M).any()) {
        assert(SR.LaneMask == M && "Expecting lane masks to match exactly");
        return SR;
      }
    }
    llvm_unreachable("Subrange for mask not found");
  };

  const LiveInterval &LI = getInterval(Reg);
  const LiveRange &OldRange = getSubRange(LI, LaneMask);

  // Extend intervals to reach all uses in WorkList.
  while (!WorkList.empty()) {
    SlotIndex Idx = WorkList.back().first;
    VNInfo *VNI = WorkList.back().second;
    WorkList.pop_back();
    const MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Idx.getPrevSlot());
    SlotIndex BlockStart = Indexes->getMBBStartIdx(MBB);

    // Extend the live range for VNI to be live at Idx.
    if (VNInfo *ExtVNI = Segments.extendInBlock(BlockStart, Idx)) {
      assert(ExtVNI == VNI && "Unexpected existing value number");
      (void)ExtVNI;
      // Is this a PHIDef we haven't seen before?
      if (!VNI->isPHIDef() || VNI->def != BlockStart ||
          !UsedPHIs.insert(VNI).second)
        continue;
      // The PHI is live, make sure the predecessors are live-out.
      for (const MachineBasicBlock *Pred : MBB->predecessors()) {
        if (!LiveOut.insert(Pred).second)
          continue;
        SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
        // A predecessor is not required to have a live-out value for a PHI.
        if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
          WorkList.push_back(std::make_pair(Stop, PVNI));
      }
      continue;
    }

    // VNI is live-in to MBB.
    LLVM_DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
    Segments.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));

    // Make sure VNI is live-out from the predecessors.
    for (const MachineBasicBlock *Pred : MBB->predecessors()) {
      if (!LiveOut.insert(Pred).second)
        continue;
      SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
      if (VNInfo *OldVNI = OldRange.getVNInfoBefore(Stop)) {
        assert(OldVNI == VNI && "Wrong value out of predecessor");
        (void)OldVNI;
        WorkList.push_back(std::make_pair(Stop, VNI));
      } else {
#ifndef NDEBUG
        // There was no old VNI. Verify that Stop is jointly dominated
        // by <undef>s for this live range.
        assert(LaneMask.any() &&
               "Missing value out of predecessor for main range");
        SmallVector<SlotIndex,8> Undefs;
        LI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
        assert(LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes) &&
               "Missing value out of predecessor for subrange");
#endif
      }
    }
  }
}

bool LiveIntervals::shrinkToUses(LiveInterval *li,
                                 SmallVectorImpl<MachineInstr*> *dead) {
  LLVM_DEBUG(dbgs() << "Shrink: " << *li << '\n');
  assert(Register::isVirtualRegister(li->reg) &&
         "Can only shrink virtual registers");

  // Shrink subregister live ranges.
  bool NeedsCleanup = false;
  for (LiveInterval::SubRange &S : li->subranges()) {
    shrinkToUses(S, li->reg);
    if (S.empty())
      NeedsCleanup = true;
  }
  if (NeedsCleanup)
    li->removeEmptySubRanges();

  // Find all the values used, including PHI kills.
  ShrinkToUsesWorkList WorkList;

  // Visit all instructions reading li->reg.
  unsigned Reg = li->reg;
  for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) {
    if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg))
      continue;
    SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
    LiveQueryResult LRQ = li->Query(Idx);
    VNInfo *VNI = LRQ.valueIn();
    if (!VNI) {
      // This shouldn't happen: readsVirtualRegister returns true, but there is
      // no live value. It is likely caused by a target getting <undef> flags
      // wrong.
      LLVM_DEBUG(
          dbgs() << Idx << '\t' << UseMI
                 << "Warning: Instr claims to read non-existent value in "
                 << *li << '\n');
      continue;
    }
    // Special case: An early-clobber tied operand reads and writes the
    // register one slot early.
    if (VNInfo *DefVNI = LRQ.valueDefined())
      Idx = DefVNI->def;

    WorkList.push_back(std::make_pair(Idx, VNI));
  }

  // Create new live ranges with only minimal live segments per def.
  LiveRange NewLR;
  createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
  extendSegmentsToUses(NewLR, WorkList, Reg, LaneBitmask::getNone());

  // Move the trimmed segments back.
  li->segments.swap(NewLR.segments);

  // Handle dead values.
  bool CanSeparate = computeDeadValues(*li, dead);
  LLVM_DEBUG(dbgs() << "Shrunk: " << *li << '\n');
  return CanSeparate;
}

bool LiveIntervals::computeDeadValues(LiveInterval &LI,
                                      SmallVectorImpl<MachineInstr*> *dead) {
  bool MayHaveSplitComponents = false;
  bool HaveDeadDef = false;

  for (VNInfo *VNI : LI.valnos) {
    if (VNI->isUnused())
      continue;
    SlotIndex Def = VNI->def;
    LiveRange::iterator I = LI.FindSegmentContaining(Def);
    assert(I != LI.end() && "Missing segment for VNI");

    // Is the register live before? Otherwise we may have to add a read-undef
    // flag for subregister defs.
    unsigned VReg = LI.reg;
    if (MRI->shouldTrackSubRegLiveness(VReg)) {
      if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
        MachineInstr *MI = getInstructionFromIndex(Def);
        MI->setRegisterDefReadUndef(VReg);
      }
    }

    if (I->end != Def.getDeadSlot())
      continue;
    if (VNI->isPHIDef()) {
      // This is a dead PHI. Remove it.
      VNI->markUnused();
      LI.removeSegment(I);
      LLVM_DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
      MayHaveSplitComponents = true;
    } else {
      // This is a dead def. Make sure the instruction knows.
      MachineInstr *MI = getInstructionFromIndex(Def);
      assert(MI && "No instruction defining live value");
      MI->addRegisterDead(LI.reg, TRI);
      if (HaveDeadDef)
        MayHaveSplitComponents = true;
      HaveDeadDef = true;

      if (dead && MI->allDefsAreDead()) {
        LLVM_DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
        dead->push_back(MI);
      }
    }
  }
  return MayHaveSplitComponents;
}

void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
  LLVM_DEBUG(dbgs() << "Shrink: " << SR << '\n');
  assert(Register::isVirtualRegister(Reg) &&
         "Can only shrink virtual registers");
  // Find all the values used, including PHI kills.
  ShrinkToUsesWorkList WorkList;

  // Visit all instructions reading Reg.
  SlotIndex LastIdx;
  for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
    // Skip "undef" uses.
    if (!MO.readsReg())
      continue;
    // Maybe the operand is for a subregister we don't care about.
    unsigned SubReg = MO.getSubReg();
    if (SubReg != 0) {
      LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
      if ((LaneMask & SR.LaneMask).none())
        continue;
    }
    // We only need to visit each instruction once.
    MachineInstr *UseMI = MO.getParent();
    SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
    if (Idx == LastIdx)
      continue;
    LastIdx = Idx;

    LiveQueryResult LRQ = SR.Query(Idx);
    VNInfo *VNI = LRQ.valueIn();
    // For Subranges it is possible that only undef values are left in that
    // part of the subregister, so there is no real liverange at the use
    if (!VNI)
      continue;

    // Special case: An early-clobber tied operand reads and writes the
    // register one slot early.
    if (VNInfo *DefVNI = LRQ.valueDefined())
      Idx = DefVNI->def;

    WorkList.push_back(std::make_pair(Idx, VNI));
  }

  // Create a new live ranges with only minimal live segments per def.
  LiveRange NewLR;
  createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
  extendSegmentsToUses(NewLR, WorkList, Reg, SR.LaneMask);

  // Move the trimmed ranges back.
  SR.segments.swap(NewLR.segments);

  // Remove dead PHI value numbers
  for (VNInfo *VNI : SR.valnos) {
    if (VNI->isUnused())
      continue;
    const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
    assert(Segment != nullptr && "Missing segment for VNI");
    if (Segment->end != VNI->def.getDeadSlot())
      continue;
    if (VNI->isPHIDef()) {
      // This is a dead PHI. Remove it.
      LLVM_DEBUG(dbgs() << "Dead PHI at " << VNI->def
                        << " may separate interval\n");
      VNI->markUnused();
      SR.removeSegment(*Segment);
    }
  }

  LLVM_DEBUG(dbgs() << "Shrunk: " << SR << '\n');
}

void LiveIntervals::extendToIndices(LiveRange &LR,
                                    ArrayRef<SlotIndex> Indices,
                                    ArrayRef<SlotIndex> Undefs) {
  assert(LRCalc && "LRCalc not initialized.");
  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
  for (SlotIndex Idx : Indices)
    LRCalc->extend(LR, Idx, /*PhysReg=*/0, Undefs);
}

void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
                               SmallVectorImpl<SlotIndex> *EndPoints) {
  LiveQueryResult LRQ = LR.Query(Kill);
  VNInfo *VNI = LRQ.valueOutOrDead();
  if (!VNI)
    return;

  MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
  SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);

  // If VNI isn't live out from KillMBB, the value is trivially pruned.
  if (LRQ.endPoint() < MBBEnd) {
    LR.removeSegment(Kill, LRQ.endPoint());
    if (EndPoints) EndPoints->push_back(LRQ.endPoint());
    return;
  }

  // VNI is live out of KillMBB.
  LR.removeSegment(Kill, MBBEnd);
  if (EndPoints) EndPoints->push_back(MBBEnd);

  // Find all blocks that are reachable from KillMBB without leaving VNI's live
  // range. It is possible that KillMBB itself is reachable, so start a DFS
  // from each successor.
  using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>;
  VisitedTy Visited;
  for (MachineBasicBlock *Succ : KillMBB->successors()) {
    for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
         I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited);
         I != E;) {
      MachineBasicBlock *MBB = *I;

      // Check if VNI is live in to MBB.
      SlotIndex MBBStart, MBBEnd;
      std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
      LiveQueryResult LRQ = LR.Query(MBBStart);
      if (LRQ.valueIn() != VNI) {
        // This block isn't part of the VNI segment. Prune the search.
        I.skipChildren();
        continue;
      }

      // Prune the search if VNI is killed in MBB.
      if (LRQ.endPoint() < MBBEnd) {
        LR.removeSegment(MBBStart, LRQ.endPoint());
        if (EndPoints) EndPoints->push_back(LRQ.endPoint());
        I.skipChildren();
        continue;
      }

      // VNI is live through MBB.
      LR.removeSegment(MBBStart, MBBEnd);
      if (EndPoints) EndPoints->push_back(MBBEnd);
      ++I;
    }
  }
}

//===----------------------------------------------------------------------===//
// Register allocator hooks.
//

void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
  // Keep track of regunit ranges.
  SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
  // Keep track of subregister ranges.
  SmallVector<std::pair<const LiveInterval::SubRange*,
                        LiveRange::const_iterator>, 4> SRs;

  for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
    unsigned Reg = Register::index2VirtReg(i);
    if (MRI->reg_nodbg_empty(Reg))
      continue;
    const LiveInterval &LI = getInterval(Reg);
    if (LI.empty())
      continue;

    // Find the regunit intervals for the assigned register. They may overlap
    // the virtual register live range, cancelling any kills.
    RU.clear();
    for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid();
         ++Unit) {
      const LiveRange &RURange = getRegUnit(*Unit);
      if (RURange.empty())
        continue;
      RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
    }

    if (MRI->subRegLivenessEnabled()) {
      SRs.clear();
      for (const LiveInterval::SubRange &SR : LI.subranges()) {
        SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
      }
    }

    // Every instruction that kills Reg corresponds to a segment range end
    // point.
    for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
         ++RI) {
      // A block index indicates an MBB edge.
      if (RI->end.isBlock())
        continue;
      MachineInstr *MI = getInstructionFromIndex(RI->end);
      if (!MI)
        continue;

      // Check if any of the regunits are live beyond the end of RI. That could
      // happen when a physreg is defined as a copy of a virtreg:
      //
      //   %eax = COPY %5
      //   FOO %5             <--- MI, cancel kill because %eax is live.
      //   BAR killed %eax
      //
      // There should be no kill flag on FOO when %5 is rewritten as %eax.
      for (auto &RUP : RU) {
        const LiveRange &RURange = *RUP.first;
        LiveRange::const_iterator &I = RUP.second;
        if (I == RURange.end())
          continue;
        I = RURange.advanceTo(I, RI->end);
        if (I == RURange.end() || I->start >= RI->end)
          continue;
        // I is overlapping RI.
        goto CancelKill;
      }

      if (MRI->subRegLivenessEnabled()) {
        // When reading a partial undefined value we must not add a kill flag.
        // The regalloc might have used the undef lane for something else.
        // Example:
        //     %1 = ...                  ; R32: %1
        //     %2:high16 = ...           ; R64: %2
        //        = read killed %2        ; R64: %2
        //        = read %1              ; R32: %1
        // The <kill> flag is correct for %2, but the register allocator may
        // assign R0L to %1, and R0 to %2 because the low 32bits of R0
        // are actually never written by %2. After assignment the <kill>
        // flag at the read instruction is invalid.
        LaneBitmask DefinedLanesMask;
        if (!SRs.empty()) {
          // Compute a mask of lanes that are defined.
          DefinedLanesMask = LaneBitmask::getNone();
          for (auto &SRP : SRs) {
            const LiveInterval::SubRange &SR = *SRP.first;
            LiveRange::const_iterator &I = SRP.second;
            if (I == SR.end())
              continue;
            I = SR.advanceTo(I, RI->end);
            if (I == SR.end() || I->start >= RI->end)
              continue;
            // I is overlapping RI
            DefinedLanesMask |= SR.LaneMask;
          }
        } else
          DefinedLanesMask = LaneBitmask::getAll();

        bool IsFullWrite = false;
        for (const MachineOperand &MO : MI->operands()) {
          if (!MO.isReg() || MO.getReg() != Reg)
            continue;
          if (MO.isUse()) {
            // Reading any undefined lanes?
            LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
            if ((UseMask & ~DefinedLanesMask).any())
              goto CancelKill;
          } else if (MO.getSubReg() == 0) {
            // Writing to the full register?
            assert(MO.isDef());
            IsFullWrite = true;
          }
        }

        // If an instruction writes to a subregister, a new segment starts in
        // the LiveInterval. But as this is only overriding part of the register
        // adding kill-flags is not correct here after registers have been
        // assigned.
        if (!IsFullWrite) {
          // Next segment has to be adjacent in the subregister write case.
          LiveRange::const_iterator N = std::next(RI);
          if (N != LI.end() && N->start == RI->end)
            goto CancelKill;
        }
      }

      MI->addRegisterKilled(Reg, nullptr);
      continue;
CancelKill:
      MI->clearRegisterKills(Reg, nullptr);
    }
  }
}

MachineBasicBlock*
LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
  // A local live range must be fully contained inside the block, meaning it is
  // defined and killed at instructions, not at block boundaries. It is not
  // live in or out of any block.
  //
  // It is technically possible to have a PHI-defined live range identical to a
  // single block, but we are going to return false in that case.

  SlotIndex Start = LI.beginIndex();
  if (Start.isBlock())
    return nullptr;

  SlotIndex Stop = LI.endIndex();
  if (Stop.isBlock())
    return nullptr;

  // getMBBFromIndex doesn't need to search the MBB table when both indexes
  // belong to proper instructions.
  MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
  MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
  return MBB1 == MBB2 ? MBB1 : nullptr;
}

bool
LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
  for (const VNInfo *PHI : LI.valnos) {
    if (PHI->isUnused() || !PHI->isPHIDef())
      continue;
    const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
    // Conservatively return true instead of scanning huge predecessor lists.
    if (PHIMBB->pred_size() > 100)
      return true;
    for (const MachineBasicBlock *Pred : PHIMBB->predecessors())
      if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred)))
        return true;
  }
  return false;
}

float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
                                    const MachineBlockFrequencyInfo *MBFI,
                                    const MachineInstr &MI) {
  return getSpillWeight(isDef, isUse, MBFI, MI.getParent());
}

float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
                                    const MachineBlockFrequencyInfo *MBFI,
                                    const MachineBasicBlock *MBB) {
  BlockFrequency Freq = MBFI->getBlockFreq(MBB);
  const float Scale = 1.0f / MBFI->getEntryFreq();
  return (isDef + isUse) * (Freq.getFrequency() * Scale);
}

LiveRange::Segment
LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
  LiveInterval& Interval = createEmptyInterval(reg);
  VNInfo *VN = Interval.getNextValue(
      SlotIndex(getInstructionIndex(startInst).getRegSlot()),
      getVNInfoAllocator());
  LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
                       getMBBEndIdx(startInst.getParent()), VN);
  Interval.addSegment(S);

  return S;
}

//===----------------------------------------------------------------------===//
//                          Register mask functions
//===----------------------------------------------------------------------===//

bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
                                             BitVector &UsableRegs) {
  if (LI.empty())
    return false;
  LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();

  // Use a smaller arrays for local live ranges.
  ArrayRef<SlotIndex> Slots;
  ArrayRef<const uint32_t*> Bits;
  if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
    Slots = getRegMaskSlotsInBlock(MBB->getNumber());
    Bits = getRegMaskBitsInBlock(MBB->getNumber());
  } else {
    Slots = getRegMaskSlots();
    Bits = getRegMaskBits();
  }

  // We are going to enumerate all the register mask slots contained in LI.
  // Start with a binary search of RegMaskSlots to find a starting point.
  ArrayRef<SlotIndex>::iterator SlotI = llvm::lower_bound(Slots, LiveI->start);
  ArrayRef<SlotIndex>::iterator SlotE = Slots.end();

  // No slots in range, LI begins after the last call.
  if (SlotI == SlotE)
    return false;

  bool Found = false;
  while (true) {
    assert(*SlotI >= LiveI->start);
    // Loop over all slots overlapping this segment.
    while (*SlotI < LiveI->end) {
      // *SlotI overlaps LI. Collect mask bits.
      if (!Found) {
        // This is the first overlap. Initialize UsableRegs to all ones.
        UsableRegs.clear();
        UsableRegs.resize(TRI->getNumRegs(), true);
        Found = true;
      }
      // Remove usable registers clobbered by this mask.
      UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
      if (++SlotI == SlotE)
        return Found;
    }
    // *SlotI is beyond the current LI segment.
    LiveI = LI.advanceTo(LiveI, *SlotI);
    if (LiveI == LiveE)
      return Found;
    // Advance SlotI until it overlaps.
    while (*SlotI < LiveI->start)
      if (++SlotI == SlotE)
        return Found;
  }
}

//===----------------------------------------------------------------------===//
//                         IntervalUpdate class.
//===----------------------------------------------------------------------===//

/// Toolkit used by handleMove to trim or extend live intervals.
class LiveIntervals::HMEditor {
private:
  LiveIntervals& LIS;
  const MachineRegisterInfo& MRI;
  const TargetRegisterInfo& TRI;
  SlotIndex OldIdx;
  SlotIndex NewIdx;
  SmallPtrSet<LiveRange*, 8> Updated;
  bool UpdateFlags;

public:
  HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
           const TargetRegisterInfo& TRI,
           SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
    : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
      UpdateFlags(UpdateFlags) {}

  // FIXME: UpdateFlags is a workaround that creates live intervals for all
  // physregs, even those that aren't needed for regalloc, in order to update
  // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
  // flags, and postRA passes will use a live register utility instead.
  LiveRange *getRegUnitLI(unsigned Unit) {
    if (UpdateFlags && !MRI.isReservedRegUnit(Unit))
      return &LIS.getRegUnit(Unit);
    return LIS.getCachedRegUnit(Unit);
  }

  /// Update all live ranges touched by MI, assuming a move from OldIdx to
  /// NewIdx.
  void updateAllRanges(MachineInstr *MI) {
    LLVM_DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": "
                      << *MI);
    bool hasRegMask = false;
    for (MachineOperand &MO : MI->operands()) {
      if (MO.isRegMask())
        hasRegMask = true;
      if (!MO.isReg())
        continue;
      if (MO.isUse()) {
        if (!MO.readsReg())
          continue;
        // Aggressively clear all kill flags.
        // They are reinserted by VirtRegRewriter.
        MO.setIsKill(false);
      }

      Register Reg = MO.getReg();
      if (!Reg)
        continue;
      if (Register::isVirtualRegister(Reg)) {
        LiveInterval &LI = LIS.getInterval(Reg);
        if (LI.hasSubRanges()) {
          unsigned SubReg = MO.getSubReg();
          LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
                                        : MRI.getMaxLaneMaskForVReg(Reg);
          for (LiveInterval::SubRange &S : LI.subranges()) {
            if ((S.LaneMask & LaneMask).none())
              continue;
            updateRange(S, Reg, S.LaneMask);
          }
        }
        updateRange(LI, Reg, LaneBitmask::getNone());
        continue;
      }

      // For physregs, only update the regunits that actually have a
      // precomputed live range.
      for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
        if (LiveRange *LR = getRegUnitLI(*Units))
          updateRange(*LR, *Units, LaneBitmask::getNone());
    }
    if (hasRegMask)
      updateRegMaskSlots();
  }

private:
  /// Update a single live range, assuming an instruction has been moved from
  /// OldIdx to NewIdx.
  void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
    if (!Updated.insert(&LR).second)
      return;
    LLVM_DEBUG({
      dbgs() << "     ";
      if (Register::isVirtualRegister(Reg)) {
        dbgs() << printReg(Reg);
        if (LaneMask.any())
          dbgs() << " L" << PrintLaneMask(LaneMask);
      } else {
        dbgs() << printRegUnit(Reg, &TRI);
      }
      dbgs() << ":\t" << LR << '\n';
    });
    if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
      handleMoveDown(LR);
    else
      handleMoveUp(LR, Reg, LaneMask);
    LLVM_DEBUG(dbgs() << "        -->\t" << LR << '\n');
    LR.verify();
  }

  /// Update LR to reflect an instruction has been moved downwards from OldIdx
  /// to NewIdx (OldIdx < NewIdx).
  void handleMoveDown(LiveRange &LR) {
    LiveRange::iterator E = LR.end();
    // Segment going into OldIdx.
    LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());

    // No value live before or after OldIdx? Nothing to do.
    if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
      return;

    LiveRange::iterator OldIdxOut;
    // Do we have a value live-in to OldIdx?
    if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
      // If the live-in value already extends to NewIdx, there is nothing to do.
      if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
        return;
      // Aggressively remove all kill flags from the old kill point.
      // Kill flags shouldn't be used while live intervals exist, they will be
      // reinserted by VirtRegRewriter.
      if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
        for (MachineOperand &MOP : mi_bundle_ops(*KillMI))
          if (MOP.isReg() && MOP.isUse())
            MOP.setIsKill(false);

      // Is there a def before NewIdx which is not OldIdx?
      LiveRange::iterator Next = std::next(OldIdxIn);
      if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
          SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
        // If we are here then OldIdx was just a use but not a def. We only have
        // to ensure liveness extends to NewIdx.
        LiveRange::iterator NewIdxIn =
          LR.advanceTo(Next, NewIdx.getBaseIndex());
        // Extend the segment before NewIdx if necessary.
        if (NewIdxIn == E ||
            !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
          LiveRange::iterator Prev = std::prev(NewIdxIn);
          Prev->end = NewIdx.getRegSlot();
        }
        // Extend OldIdxIn.
        OldIdxIn->end = Next->start;
        return;
      }

      // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
      // invalid by overlapping ranges.
      bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
      OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
      // If this was not a kill, then there was no def and we're done.
      if (!isKill)
        return;

      // Did we have a Def at OldIdx?
      OldIdxOut = Next;
      if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
        return;
    } else {
      OldIdxOut = OldIdxIn;
    }

    // If we are here then there is a Definition at OldIdx. OldIdxOut points
    // to the segment starting there.
    assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
           "No def?");
    VNInfo *OldIdxVNI = OldIdxOut->valno;
    assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");

    // If the defined value extends beyond NewIdx, just move the beginning
    // of the segment to NewIdx.
    SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
    if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
      OldIdxVNI->def = NewIdxDef;
      OldIdxOut->start = OldIdxVNI->def;
      return;
    }

    // If we are here then we have a Definition at OldIdx which ends before
    // NewIdx.

    // Is there an existing Def at NewIdx?
    LiveRange::iterator AfterNewIdx
      = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
    bool OldIdxDefIsDead = OldIdxOut->end.isDead();
    if (!OldIdxDefIsDead &&
        SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
      // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
      VNInfo *DefVNI;
      if (OldIdxOut != LR.begin() &&
          !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
                                     OldIdxOut->start)) {
        // There is no gap between OldIdxOut and its predecessor anymore,
        // merge them.
        LiveRange::iterator IPrev = std::prev(OldIdxOut);
        DefVNI = OldIdxVNI;
        IPrev->end = OldIdxOut->end;
      } else {
        // The value is live in to OldIdx
        LiveRange::iterator INext = std::next(OldIdxOut);
        assert(INext != E && "Must have following segment");
        // We merge OldIdxOut and its successor. As we're dealing with subreg
        // reordering, there is always a successor to OldIdxOut in the same BB
        // We don't need INext->valno anymore and will reuse for the new segment
        // we create later.
        DefVNI = OldIdxVNI;
        INext->start = OldIdxOut->end;
        INext->valno->def = INext->start;
      }
      // If NewIdx is behind the last segment, extend that and append a new one.
      if (AfterNewIdx == E) {
        // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
        // one position.
        //    |-  ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
        // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
        std::copy(std::next(OldIdxOut), E, OldIdxOut);
        // The last segment is undefined now, reuse it for a dead def.
        LiveRange::iterator NewSegment = std::prev(E);
        *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
                                         DefVNI);
        DefVNI->def = NewIdxDef;

        LiveRange::iterator Prev = std::prev(NewSegment);
        Prev->end = NewIdxDef;
      } else {
        // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
        // one position.
        //    |-  ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
        // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
        std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
        LiveRange::iterator Prev = std::prev(AfterNewIdx);
        // We have two cases:
        if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
          // Case 1: NewIdx is inside a liverange. Split this liverange at
          // NewIdxDef into the segment "Prev" followed by "NewSegment".
          LiveRange::iterator NewSegment = AfterNewIdx;
          *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
          Prev->valno->def = NewIdxDef;

          *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
          DefVNI->def = Prev->start;
        } else {
          // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
          // turn Prev into a segment from NewIdx to AfterNewIdx->start.
          *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
          DefVNI->def = NewIdxDef;
          assert(DefVNI != AfterNewIdx->valno);
        }
      }
      return;
    }

    if (AfterNewIdx != E &&
        SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
      // There is an existing def at NewIdx. The def at OldIdx is coalesced into
      // that value.
      assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
      LR.removeValNo(OldIdxVNI);
    } else {
      // There was no existing def at NewIdx. We need to create a dead def
      // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
      // a new segment at the place where we want to construct the dead def.
      //    |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
      // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
      assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
      std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
      // We can reuse OldIdxVNI now.
      LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
      VNInfo *NewSegmentVNI = OldIdxVNI;
      NewSegmentVNI->def = NewIdxDef;
      *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
                                       NewSegmentVNI);
    }
  }

  /// Update LR to reflect an instruction has been moved upwards from OldIdx
  /// to NewIdx (NewIdx < OldIdx).
  void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
    LiveRange::iterator E = LR.end();
    // Segment going into OldIdx.
    LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());

    // No value live before or after OldIdx? Nothing to do.
    if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
      return;

    LiveRange::iterator OldIdxOut;
    // Do we have a value live-in to OldIdx?
    if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
      // If the live-in value isn't killed here, then we have no Def at
      // OldIdx, moreover the value must be live at NewIdx so there is nothing
      // to do.
      bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
      if (!isKill)
        return;

      // At this point we have to move OldIdxIn->end back to the nearest
      // previous use or (dead-)def but no further than NewIdx.
      SlotIndex DefBeforeOldIdx
        = std::max(OldIdxIn->start.getDeadSlot(),
                   NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
      OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);

      // Did we have a Def at OldIdx? If not we are done now.
      OldIdxOut = std::next(OldIdxIn);
      if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
        return;
    } else {
      OldIdxOut = OldIdxIn;
      OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
    }

    // If we are here then there is a Definition at OldIdx. OldIdxOut points
    // to the segment starting there.
    assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
           "No def?");
    VNInfo *OldIdxVNI = OldIdxOut->valno;
    assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
    bool OldIdxDefIsDead = OldIdxOut->end.isDead();

    // Is there an existing def at NewIdx?
    SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
    LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
    if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
      assert(NewIdxOut->valno != OldIdxVNI &&
             "Same value defined more than once?");
      // If OldIdx was a dead def remove it.
      if (!OldIdxDefIsDead) {
        // Remove segment starting at NewIdx and move begin of OldIdxOut to
        // NewIdx so it can take its place.
        OldIdxVNI->def = NewIdxDef;
        OldIdxOut->start = NewIdxDef;
        LR.removeValNo(NewIdxOut->valno);
      } else {
        // Simply remove the dead def at OldIdx.
        LR.removeValNo(OldIdxVNI);
      }
    } else {
      // Previously nothing was live after NewIdx, so all we have to do now is
      // move the begin of OldIdxOut to NewIdx.
      if (!OldIdxDefIsDead) {
        // Do we have any intermediate Defs between OldIdx and NewIdx?
        if (OldIdxIn != E &&
            SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
          // OldIdx is not a dead def and NewIdx is before predecessor start.
          LiveRange::iterator NewIdxIn = NewIdxOut;
          assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
          const SlotIndex SplitPos = NewIdxDef;
          OldIdxVNI = OldIdxIn->valno;

          SlotIndex NewDefEndPoint = std::next(NewIdxIn)->end;
          LiveRange::iterator Prev = std::prev(OldIdxIn);
          if (OldIdxIn != LR.begin() &&
              SlotIndex::isEarlierInstr(NewIdx, Prev->end)) {
            // If the segment before OldIdx read a value defined earlier than
            // NewIdx, the moved instruction also reads and forwards that
            // value. Extend the lifetime of the new def point.

            // Extend to where the previous range started, unless there is
            // another redef first.
            NewDefEndPoint = std::min(OldIdxIn->start,
                                      std::next(NewIdxOut)->start);
          }

          // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
          OldIdxOut->valno->def = OldIdxIn->start;
          *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
                                          OldIdxOut->valno);
          // OldIdxIn and OldIdxVNI are now undef and can be overridden.
          // We Slide [NewIdxIn, OldIdxIn) down one position.
          //    |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
          // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
          std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
          // NewIdxIn is now considered undef so we can reuse it for the moved
          // value.
          LiveRange::iterator NewSegment = NewIdxIn;
          LiveRange::iterator Next = std::next(NewSegment);
          if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
            // There is no gap between NewSegment and its predecessor.
            *NewSegment = LiveRange::Segment(Next->start, SplitPos,
                                             Next->valno);

            *Next = LiveRange::Segment(SplitPos, NewDefEndPoint, OldIdxVNI);
            Next->valno->def = SplitPos;
          } else {
            // There is a gap between NewSegment and its predecessor
            // Value becomes live in.
            *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
            NewSegment->valno->def = SplitPos;
          }
        } else {
          // Leave the end point of a live def.
          OldIdxOut->start = NewIdxDef;
          OldIdxVNI->def = NewIdxDef;
          if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
            OldIdxIn->end = NewIdx.getRegSlot();
        }
      } else if (OldIdxIn != E
          && SlotIndex::isEarlierInstr(NewIdxOut->start, NewIdx)
          && SlotIndex::isEarlierInstr(NewIdx, NewIdxOut->end)) {
        // OldIdxVNI is a dead def that has been moved into the middle of
        // another value in LR. That can happen when LR is a whole register,
        // but the dead def is a write to a subreg that is dead at NewIdx.
        // The dead def may have been moved across other values
        // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
        // down one position.
        //    |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
        // => |- X0/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
        std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
        // Modify the segment at NewIdxOut and the following segment to meet at
        // the point of the dead def, with the following segment getting
        // OldIdxVNI as its value number.
        *NewIdxOut = LiveRange::Segment(
            NewIdxOut->start, NewIdxDef.getRegSlot(), NewIdxOut->valno);
        *(NewIdxOut + 1) = LiveRange::Segment(
            NewIdxDef.getRegSlot(), (NewIdxOut + 1)->end, OldIdxVNI);
        OldIdxVNI->def = NewIdxDef;
        // Modify subsequent segments to be defined by the moved def OldIdxVNI.
        for (auto Idx = NewIdxOut + 2; Idx <= OldIdxOut; ++Idx)
          Idx->valno = OldIdxVNI;
        // Aggressively remove all dead flags from the former dead definition.
        // Kill/dead flags shouldn't be used while live intervals exist; they
        // will be reinserted by VirtRegRewriter.
        if (MachineInstr *KillMI = LIS.getInstructionFromIndex(NewIdx))
          for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
            if (MO->isReg() && !MO->isUse())
              MO->setIsDead(false);
      } else {
        // OldIdxVNI is a dead def. It may have been moved across other values
        // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
        // down one position.
        //    |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
        // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
        std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
        // OldIdxVNI can be reused now to build a new dead def segment.
        LiveRange::iterator NewSegment = NewIdxOut;
        VNInfo *NewSegmentVNI = OldIdxVNI;
        *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
                                         NewSegmentVNI);
        NewSegmentVNI->def = NewIdxDef;
      }
    }
  }

  void updateRegMaskSlots() {
    SmallVectorImpl<SlotIndex>::iterator RI =
        llvm::lower_bound(LIS.RegMaskSlots, OldIdx);
    assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
           "No RegMask at OldIdx.");
    *RI = NewIdx.getRegSlot();
    assert((RI == LIS.RegMaskSlots.begin() ||
            SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
           "Cannot move regmask instruction above another call");
    assert((std::next(RI) == LIS.RegMaskSlots.end() ||
            SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
           "Cannot move regmask instruction below another call");
  }

  // Return the last use of reg between NewIdx and OldIdx.
  SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
                              LaneBitmask LaneMask) {
    if (Register::isVirtualRegister(Reg)) {
      SlotIndex LastUse = Before;
      for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
        if (MO.isUndef())
          continue;
        unsigned SubReg = MO.getSubReg();
        if (SubReg != 0 && LaneMask.any()
            && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
          continue;

        const MachineInstr &MI = *MO.getParent();
        SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
        if (InstSlot > LastUse && InstSlot < OldIdx)
          LastUse = InstSlot.getRegSlot();
      }
      return LastUse;
    }

    // This is a regunit interval, so scanning the use list could be very
    // expensive. Scan upwards from OldIdx instead.
    assert(Before < OldIdx && "Expected upwards move");
    SlotIndexes *Indexes = LIS.getSlotIndexes();
    MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);

    // OldIdx may not correspond to an instruction any longer, so set MII to
    // point to the next instruction after OldIdx, or MBB->end().
    MachineBasicBlock::iterator MII = MBB->end();
    if (MachineInstr *MI = Indexes->getInstructionFromIndex(
                           Indexes->getNextNonNullIndex(OldIdx)))
      if (MI->getParent() == MBB)
        MII = MI;

    MachineBasicBlock::iterator Begin = MBB->begin();
    while (MII != Begin) {
      if ((--MII)->isDebugInstr())
        continue;
      SlotIndex Idx = Indexes->getInstructionIndex(*MII);

      // Stop searching when Before is reached.
      if (!SlotIndex::isEarlierInstr(Before, Idx))
        return Before;

      // Check if MII uses Reg.
      for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
        if (MO->isReg() && !MO->isUndef() &&
            Register::isPhysicalRegister(MO->getReg()) &&
            TRI.hasRegUnit(MO->getReg(), Reg))
          return Idx.getRegSlot();
    }
    // Didn't reach Before. It must be the first instruction in the block.
    return Before;
  }
};

void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
  // It is fine to move a bundle as a whole, but not an individual instruction
  // inside it.
  assert((!MI.isBundled() || MI.getOpcode() == TargetOpcode::BUNDLE) &&
         "Cannot move instruction in bundle");
  SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
  Indexes->removeMachineInstrFromMaps(MI);
  SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
  assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
         OldIndex < getMBBEndIdx(MI.getParent()) &&
         "Cannot handle moves across basic block boundaries.");

  HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
  HME.updateAllRanges(&MI);
}

void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
                                         MachineInstr &BundleStart,
                                         bool UpdateFlags) {
  SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
  SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
  HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
  HME.updateAllRanges(&MI);
}

void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
                                        const MachineBasicBlock::iterator End,
                                        const SlotIndex endIdx,
                                        LiveRange &LR, const unsigned Reg,
                                        LaneBitmask LaneMask) {
  LiveInterval::iterator LII = LR.find(endIdx);
  SlotIndex lastUseIdx;
  if (LII == LR.begin()) {
    // This happens when the function is called for a subregister that only
    // occurs _after_ the range that is to be repaired.
    return;
  }
  if (LII != LR.end() && LII->start < endIdx)
    lastUseIdx = LII->end;
  else
    --LII;

  for (MachineBasicBlock::iterator I = End; I != Begin;) {
    --I;
    MachineInstr &MI = *I;
    if (MI.isDebugInstr())
      continue;

    SlotIndex instrIdx = getInstructionIndex(MI);
    bool isStartValid = getInstructionFromIndex(LII->start);
    bool isEndValid = getInstructionFromIndex(LII->end);

    // FIXME: This doesn't currently handle early-clobber or multiple removed
    // defs inside of the region to repair.
    for (MachineInstr::mop_iterator OI = MI.operands_begin(),
                                    OE = MI.operands_end();
         OI != OE; ++OI) {
      const MachineOperand &MO = *OI;
      if (!MO.isReg() || MO.getReg() != Reg)
        continue;

      unsigned SubReg = MO.getSubReg();
      LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
      if ((Mask & LaneMask).none())
        continue;

      if (MO.isDef()) {
        if (!isStartValid) {
          if (LII->end.isDead()) {
            SlotIndex prevStart;
            if (LII != LR.begin())
              prevStart = std::prev(LII)->start;

            // FIXME: This could be more efficient if there was a
            // removeSegment method that returned an iterator.
            LR.removeSegment(*LII, true);
            if (prevStart.isValid())
              LII = LR.find(prevStart);
            else
              LII = LR.begin();
          } else {
            LII->start = instrIdx.getRegSlot();
            LII->valno->def = instrIdx.getRegSlot();
            if (MO.getSubReg() && !MO.isUndef())
              lastUseIdx = instrIdx.getRegSlot();
            else
              lastUseIdx = SlotIndex();
            continue;
          }
        }

        if (!lastUseIdx.isValid()) {
          VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
          LiveRange::Segment S(instrIdx.getRegSlot(),
                               instrIdx.getDeadSlot(), VNI);
          LII = LR.addSegment(S);
        } else if (LII->start != instrIdx.getRegSlot()) {
          VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
          LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
          LII = LR.addSegment(S);
        }

        if (MO.getSubReg() && !MO.isUndef())
          lastUseIdx = instrIdx.getRegSlot();
        else
          lastUseIdx = SlotIndex();
      } else if (MO.isUse()) {
        // FIXME: This should probably be handled outside of this branch,
        // either as part of the def case (for defs inside of the region) or
        // after the loop over the region.
        if (!isEndValid && !LII->end.isBlock())
          LII->end = instrIdx.getRegSlot();
        if (!lastUseIdx.isValid())
          lastUseIdx = instrIdx.getRegSlot();
      }
    }
  }
}

void
LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
                                      MachineBasicBlock::iterator Begin,
                                      MachineBasicBlock::iterator End,
                                      ArrayRef<unsigned> OrigRegs) {
  // Find anchor points, which are at the beginning/end of blocks or at
  // instructions that already have indexes.
  while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
    --Begin;
  while (End != MBB->end() && !Indexes->hasIndex(*End))
    ++End;

  SlotIndex endIdx;
  if (End == MBB->end())
    endIdx = getMBBEndIdx(MBB).getPrevSlot();
  else
    endIdx = getInstructionIndex(*End);

  Indexes->repairIndexesInRange(MBB, Begin, End);

  for (MachineBasicBlock::iterator I = End; I != Begin;) {
    --I;
    MachineInstr &MI = *I;
    if (MI.isDebugInstr())
      continue;
    for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
                                          MOE = MI.operands_end();
         MOI != MOE; ++MOI) {
      if (MOI->isReg() && Register::isVirtualRegister(MOI->getReg()) &&
          !hasInterval(MOI->getReg())) {
        createAndComputeVirtRegInterval(MOI->getReg());
      }
    }
  }

  for (unsigned Reg : OrigRegs) {
    if (!Register::isVirtualRegister(Reg))
      continue;

    LiveInterval &LI = getInterval(Reg);
    // FIXME: Should we support undefs that gain defs?
    if (!LI.hasAtLeastOneValue())
      continue;

    for (LiveInterval::SubRange &S : LI.subranges())
      repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);

    repairOldRegInRange(Begin, End, endIdx, LI, Reg);
  }
}

void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
  for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
    if (LiveRange *LR = getCachedRegUnit(*Unit))
      if (VNInfo *VNI = LR->getVNInfoAt(Pos))
        LR->removeValNo(VNI);
  }
}

void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
  // LI may not have the main range computed yet, but its subranges may
  // be present.
  VNInfo *VNI = LI.getVNInfoAt(Pos);
  if (VNI != nullptr) {
    assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
    LI.removeValNo(VNI);
  }

  // Also remove the value defined in subranges.
  for (LiveInterval::SubRange &S : LI.subranges()) {
    if (VNInfo *SVNI = S.getVNInfoAt(Pos))
      if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
        S.removeValNo(SVNI);
  }
  LI.removeEmptySubRanges();
}

void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
    SmallVectorImpl<LiveInterval*> &SplitLIs) {
  ConnectedVNInfoEqClasses ConEQ(*this);
  unsigned NumComp = ConEQ.Classify(LI);
  if (NumComp <= 1)
    return;
  LLVM_DEBUG(dbgs() << "  Split " << NumComp << " components: " << LI << '\n');
  unsigned Reg = LI.reg;
  const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
  for (unsigned I = 1; I < NumComp; ++I) {
    Register NewVReg = MRI->createVirtualRegister(RegClass);
    LiveInterval &NewLI = createEmptyInterval(NewVReg);
    SplitLIs.push_back(&NewLI);
  }
  ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
}

void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
  assert(LRCalc && "LRCalc not initialized.");
  LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
  LRCalc->constructMainRangeFromSubranges(LI);
}